Make arena use hugepage if possible

Summary:
arena doesn't use huge page by default. This change will make it happen
if possible. A new paramerter is added for Arena(). If it's set, Arena
will use huge page always. If huge page allocation fails, Arena
allocation will fallback to malloc().

Test Plan:
Change util/arena_test to support huge page allocation.
Run below tests:
1. normal regression test:
  make check
2. Check if huge page allocation works
  echo 50 > /proc/sys/vm/nr_hugepages
  make check

Reviewers: sdong

Reviewed By: sdong

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D28647
main
Shaohua Li 10 years ago
parent 3a40c427b9
commit 1410180167
  1. 55
      util/arena.cc
  2. 7
      util/arena.h
  3. 65
      util/arena_test.cc

@ -32,13 +32,20 @@ size_t OptimizeBlockSize(size_t block_size) {
return block_size;
}
Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) {
Arena::Arena(size_t block_size, size_t huge_page_size)
: kBlockSize(OptimizeBlockSize(block_size)) {
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
kBlockSize % kAlignUnit == 0);
alloc_bytes_remaining_ = sizeof(inline_block_);
blocks_memory_ += alloc_bytes_remaining_;
aligned_alloc_ptr_ = inline_block_;
unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
#ifdef MAP_HUGETLB
hugetlb_size_ = huge_page_size;
if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
}
#endif
}
Arena::~Arena() {
@ -62,20 +69,49 @@ char* Arena::AllocateFallback(size_t bytes, bool aligned) {
}
// We waste the remaining space in the current block.
auto block_head = AllocateNewBlock(kBlockSize);
alloc_bytes_remaining_ = kBlockSize - bytes;
size_t size;
char* block_head = nullptr;
if (hugetlb_size_) {
size = hugetlb_size_;
block_head = AllocateFromHugePage(size);
}
if (!block_head) {
size = kBlockSize;
block_head = AllocateNewBlock(size);
}
alloc_bytes_remaining_ = size - bytes;
if (aligned) {
aligned_alloc_ptr_ = block_head + bytes;
unaligned_alloc_ptr_ = block_head + kBlockSize;
unaligned_alloc_ptr_ = block_head + size;
return block_head;
} else {
aligned_alloc_ptr_ = block_head;
unaligned_alloc_ptr_ = block_head + kBlockSize - bytes;
unaligned_alloc_ptr_ = block_head + size - bytes;
return unaligned_alloc_ptr_;
}
}
char* Arena::AllocateFromHugePage(size_t bytes) {
#ifdef MAP_HUGETLB
if (hugetlb_size_ == 0) {
return nullptr;
}
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
if (addr == MAP_FAILED) {
return nullptr;
}
huge_blocks_.push_back(MmapInfo(addr, bytes));
blocks_memory_ += bytes;
return reinterpret_cast<char*>(addr);
#else
return nullptr;
#endif
}
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
Logger* logger) {
assert((kAlignUnit & (kAlignUnit - 1)) ==
@ -88,17 +124,14 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
size_t reserved_size =
((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
assert(reserved_size >= bytes);
void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE),
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
if (addr == MAP_FAILED) {
char* addr = AllocateFromHugePage(reserved_size);
if (addr == nullptr) {
Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s",
strerror(errno));
// fail back to malloc
} else {
blocks_memory_ += reserved_size;
huge_blocks_.push_back(MmapInfo(addr, reserved_size));
return reinterpret_cast<char*>(addr);
return addr;
}
}
#endif

@ -35,7 +35,10 @@ class Arena {
static const size_t kMinBlockSize;
static const size_t kMaxBlockSize;
explicit Arena(size_t block_size = kMinBlockSize);
// huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
// supported hugepage size of the system), block allocation will try huge
// page TLB first. If allocation fails, will fall back to normal case.
explicit Arena(size_t block_size = kMinBlockSize, size_t huge_page_size = 0);
~Arena();
char* Allocate(size_t bytes);
@ -100,6 +103,8 @@ class Arena {
// How many bytes left in currently active block?
size_t alloc_bytes_remaining_ = 0;
size_t hugetlb_size_ = 0;
char* AllocateFromHugePage(size_t bytes);
char* AllocateFallback(size_t bytes, bool aligned);
char* AllocateNewBlock(size_t block_bytes);

@ -13,17 +13,21 @@
namespace rocksdb {
namespace {
const size_t kHugePageSize = 2 * 1024 * 1024;
} // namespace
class ArenaTest {};
TEST(ArenaTest, Empty) { Arena arena0; }
TEST(ArenaTest, MemoryAllocatedBytes) {
namespace {
void MemoryAllocatedBytesTest(size_t huge_page_size) {
const int N = 17;
size_t req_sz; // requested size
size_t bsz = 8192; // block size
size_t expected_memory_allocated;
Arena arena(bsz);
Arena arena(bsz, huge_page_size);
// requested size > quarter of a block:
// allocate requested size separately
@ -44,8 +48,15 @@ TEST(ArenaTest, MemoryAllocatedBytes) {
for (int i = 0; i < N; i++) {
arena.Allocate(req_sz);
}
expected_memory_allocated += bsz;
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
if (huge_page_size) {
ASSERT_TRUE(arena.MemoryAllocatedBytes() ==
expected_memory_allocated + bsz ||
arena.MemoryAllocatedBytes() ==
expected_memory_allocated + huge_page_size);
} else {
expected_memory_allocated += bsz;
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
}
// requested size > quarter of a block:
// allocate requested size separately
@ -54,16 +65,23 @@ TEST(ArenaTest, MemoryAllocatedBytes) {
arena.Allocate(req_sz);
}
expected_memory_allocated += req_sz * N;
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
if (huge_page_size) {
ASSERT_TRUE(arena.MemoryAllocatedBytes() ==
expected_memory_allocated + bsz ||
arena.MemoryAllocatedBytes() ==
expected_memory_allocated + huge_page_size);
} else {
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
}
}
// Make sure we didn't count the allocate but not used memory space in
// Arena::ApproximateMemoryUsage()
TEST(ArenaTest, ApproximateMemoryUsageTest) {
static void ApproximateMemoryUsageTest(size_t huge_page_size) {
const size_t kBlockSize = 4096;
const size_t kEntrySize = kBlockSize / 8;
const size_t kZero = 0;
Arena arena(kBlockSize);
Arena arena(kBlockSize, huge_page_size);
ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
// allocate inline bytes
@ -78,7 +96,12 @@ TEST(ArenaTest, ApproximateMemoryUsageTest) {
// first allocation
arena.AllocateAligned(kEntrySize);
auto mem_usage = arena.MemoryAllocatedBytes();
ASSERT_EQ(mem_usage, kBlockSize + Arena::kInlineSize);
if (huge_page_size) {
ASSERT_TRUE(mem_usage == kBlockSize + Arena::kInlineSize ||
mem_usage == huge_page_size + Arena::kInlineSize);
} else {
ASSERT_EQ(mem_usage, kBlockSize + Arena::kInlineSize);
}
auto usage = arena.ApproximateMemoryUsage();
ASSERT_LT(usage, mem_usage);
for (size_t i = 1; i < num_blocks; ++i) {
@ -87,12 +110,17 @@ TEST(ArenaTest, ApproximateMemoryUsageTest) {
ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize);
usage = arena.ApproximateMemoryUsage();
}
ASSERT_GT(usage, mem_usage);
if (huge_page_size) {
ASSERT_TRUE(usage > mem_usage ||
usage + huge_page_size - kBlockSize == mem_usage);
} else {
ASSERT_GT(usage, mem_usage);
}
}
TEST(ArenaTest, Simple) {
static void SimpleTest(size_t huge_page_size) {
std::vector<std::pair<size_t, char*>> allocated;
Arena arena;
Arena arena(Arena::kMinBlockSize, huge_page_size);
const int N = 100000;
size_t bytes = 0;
Random rnd(301);
@ -136,7 +164,22 @@ TEST(ArenaTest, Simple) {
}
}
}
} // namespace
TEST(ArenaTest, MemoryAllocatedBytes) {
MemoryAllocatedBytesTest(0);
MemoryAllocatedBytesTest(kHugePageSize);
}
TEST(ArenaTest, ApproximateMemoryUsage) {
ApproximateMemoryUsageTest(0);
ApproximateMemoryUsageTest(kHugePageSize);
}
TEST(ArenaTest, Simple) {
SimpleTest(0);
SimpleTest(kHugePageSize);
}
} // namespace rocksdb
int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); }

Loading…
Cancel
Save