From 3e4a9ec2413aa39ae550dded6fa75d3a601d748b Mon Sep 17 00:00:00 2001 From: sdong Date: Fri, 9 May 2014 11:01:54 -0700 Subject: [PATCH] Arena to inline 2KB of data in it. Summary: In order to use arena to a use case that the total allocation size might be small (LogBuffer is already such a case), inline 1KB of data in it, so that it can be mostly in stack or inline in another class. If always inlining 2KB is a concern, I could make it a template to determine what to inline. However, dependents need to changes. Doesn't go with it for now Test Plan: make all check. Reviewers: haobo, igor, yhchiang, dhruba Reviewed By: haobo CC: leveldb Differential Revision: https://reviews.facebook.net/D18609 --- db/db_test.cc | 4 ++-- util/arena.cc | 10 +++++++--- util/arena.h | 15 +++++++++------ util/arena_test.cc | 13 +++++++++++-- 4 files changed, 29 insertions(+), 13 deletions(-) diff --git a/db/db_test.cc b/db/db_test.cc index 693e56008..05403fc07 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2365,9 +2365,9 @@ TEST(DBTest, NumImmutableMemTable) { ASSERT_EQ(num, "0"); ASSERT_TRUE(dbfull()->GetProperty( handles_[1], "rocksdb.cur-size-active-mem-table", &num)); - // "208" is the size of the metadata of an empty skiplist, this would + // "200" is the size of the metadata of an empty skiplist, this would // break if we change the default skiplist implementation - ASSERT_EQ(num, "208"); + ASSERT_EQ(num, "200"); SetPerfLevel(kDisable); } while (ChangeCompactOptions()); } diff --git a/util/arena.cc b/util/arena.cc index 1775addcd..51b56da7d 100644 --- a/util/arena.cc +++ b/util/arena.cc @@ -34,6 +34,10 @@ size_t OptimizeBlockSize(size_t block_size) { Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) { assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize && kBlockSize % kAlignUnit == 0); + alloc_bytes_remaining_ = sizeof(inline_block_); + blocks_memory_ += alloc_bytes_remaining_; + aligned_alloc_ptr_ = inline_block_; + unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_; } Arena::~Arena() { @@ -71,17 +75,17 @@ char* Arena::AllocateFallback(size_t bytes, bool aligned) { } } -char* Arena::AllocateAligned(size_t bytes, size_t huge_page_tlb_size, +char* Arena::AllocateAligned(size_t bytes, size_t huage_page_size, Logger* logger) { assert((kAlignUnit & (kAlignUnit - 1)) == 0); // Pointer size should be a power of 2 #ifdef MAP_HUGETLB - if (huge_page_tlb_size > 0 && bytes > 0) { + if (huage_page_size > 0 && bytes > 0) { // Allocate from a huge page TBL table. assert(logger != nullptr); // logger need to be passed in. size_t reserved_size = - ((bytes - 1U) / huge_page_tlb_size + 1U) * huge_page_tlb_size; + ((bytes - 1U) / huage_page_size + 1U) * huage_page_size; assert(reserved_size >= bytes); void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0); diff --git a/util/arena.h b/util/arena.h index 161a253aa..0c9aa9738 100644 --- a/util/arena.h +++ b/util/arena.h @@ -28,6 +28,7 @@ class Arena { Arena(const Arena&) = delete; void operator=(const Arena&) = delete; + static const size_t kInlineSize = 2048; static const size_t kMinBlockSize; static const size_t kMaxBlockSize; @@ -36,18 +37,19 @@ class Arena { char* Allocate(size_t bytes); - // huge_page_tlb_size: if >0, allocate bytes from huge page TLB and the size - // of the huge page TLB. Bytes will be rounded up to multiple and 2MB and - // allocate huge pages through mmap anonymous option with huge page on. - // The extra space allocated will be wasted. To enable it, need to reserve - // huge pages for it to be allocated, like: + // huge_page_size: if >0, will try to allocate from huage page TLB. + // The argument will be the size of the page size for huge page TLB. Bytes + // will be rounded up to multiple of the page size to allocate through mmap + // anonymous option with huge page on. The extra space allocated will be + // wasted. If allocation fails, will fall back to normal case. To enable it, + // need to reserve huge pages for it to be allocated, like: // sysctl -w vm.nr_hugepages=20 // See linux doc Documentation/vm/hugetlbpage.txt for details. // huge page allocation can fail. In this case it will fail back to // normal cases. The messages will be logged to logger. So when calling with // huge_page_tlb_size > 0, we highly recommend a logger is passed in. // Otherwise, the error message will be printed out to stderr directly. - char* AllocateAligned(size_t bytes, size_t huge_page_tlb_size = 0, + char* AllocateAligned(size_t bytes, size_t huge_page_size = 0, Logger* logger = nullptr); // Returns an estimate of the total memory usage of data allocated @@ -69,6 +71,7 @@ class Arena { size_t BlockSize() const { return kBlockSize; } private: + char inline_block_[kInlineSize]; // Number of bytes allocated in one block const size_t kBlockSize; // Array of new[] allocated memory blocks diff --git a/util/arena_test.cc b/util/arena_test.cc index 1b2b53175..7b6cfd0af 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -31,9 +31,11 @@ TEST(ArenaTest, MemoryAllocatedBytes) { for (int i = 0; i < N; i++) { arena.Allocate(req_sz); } - expected_memory_allocated = req_sz * N; + expected_memory_allocated = req_sz * N + Arena::kInlineSize; ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated); + arena.Allocate(Arena::kInlineSize - 1); + // requested size < quarter of a block: // allocate a block with the default size, then try to use unused part // of the block. So one new block will be allocated for the first @@ -64,12 +66,19 @@ TEST(ArenaTest, ApproximateMemoryUsageTest) { Arena arena(kBlockSize); ASSERT_EQ(kZero, arena.ApproximateMemoryUsage()); + // allocate inline bytes + arena.AllocateAligned(8); + arena.AllocateAligned(Arena::kInlineSize / 2 - 16); + arena.AllocateAligned(Arena::kInlineSize / 2); + ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8); + ASSERT_EQ(arena.MemoryAllocatedBytes(), Arena::kInlineSize); + auto num_blocks = kBlockSize / kEntrySize; // first allocation arena.AllocateAligned(kEntrySize); auto mem_usage = arena.MemoryAllocatedBytes(); - ASSERT_EQ(mem_usage, kBlockSize); + ASSERT_EQ(mem_usage, kBlockSize + Arena::kInlineSize); auto usage = arena.ApproximateMemoryUsage(); ASSERT_LT(usage, mem_usage); for (size_t i = 1; i < num_blocks; ++i) {