Arena to inline 2KB of data in it.

Summary:
In order to use arena to a use case that the total allocation size might be small (LogBuffer is already such a case), inline 1KB of data in it, so that it can be mostly in stack or inline in another class.

If always inlining 2KB is a concern, I could make it a template to determine what to inline. However, dependents need to changes. Doesn't go with it for now

Test Plan: make all check.

Reviewers: haobo, igor, yhchiang, dhruba

Reviewed By: haobo

CC: leveldb

Differential Revision: https://reviews.facebook.net/D18609
main
sdong 11 years ago
parent 1ef31b6ca1
commit 3e4a9ec241
  1. 4
      db/db_test.cc
  2. 10
      util/arena.cc
  3. 15
      util/arena.h
  4. 13
      util/arena_test.cc

@ -2365,9 +2365,9 @@ TEST(DBTest, NumImmutableMemTable) {
ASSERT_EQ(num, "0"); ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty( ASSERT_TRUE(dbfull()->GetProperty(
handles_[1], "rocksdb.cur-size-active-mem-table", &num)); handles_[1], "rocksdb.cur-size-active-mem-table", &num));
// "208" is the size of the metadata of an empty skiplist, this would // "200" is the size of the metadata of an empty skiplist, this would
// break if we change the default skiplist implementation // break if we change the default skiplist implementation
ASSERT_EQ(num, "208"); ASSERT_EQ(num, "200");
SetPerfLevel(kDisable); SetPerfLevel(kDisable);
} while (ChangeCompactOptions()); } while (ChangeCompactOptions());
} }

@ -34,6 +34,10 @@ size_t OptimizeBlockSize(size_t block_size) {
Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) { Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) {
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize && assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
kBlockSize % kAlignUnit == 0); kBlockSize % kAlignUnit == 0);
alloc_bytes_remaining_ = sizeof(inline_block_);
blocks_memory_ += alloc_bytes_remaining_;
aligned_alloc_ptr_ = inline_block_;
unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
} }
Arena::~Arena() { Arena::~Arena() {
@ -71,17 +75,17 @@ char* Arena::AllocateFallback(size_t bytes, bool aligned) {
} }
} }
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_tlb_size, char* Arena::AllocateAligned(size_t bytes, size_t huage_page_size,
Logger* logger) { Logger* logger) {
assert((kAlignUnit & (kAlignUnit - 1)) == assert((kAlignUnit & (kAlignUnit - 1)) ==
0); // Pointer size should be a power of 2 0); // Pointer size should be a power of 2
#ifdef MAP_HUGETLB #ifdef MAP_HUGETLB
if (huge_page_tlb_size > 0 && bytes > 0) { if (huage_page_size > 0 && bytes > 0) {
// Allocate from a huge page TBL table. // Allocate from a huge page TBL table.
assert(logger != nullptr); // logger need to be passed in. assert(logger != nullptr); // logger need to be passed in.
size_t reserved_size = size_t reserved_size =
((bytes - 1U) / huge_page_tlb_size + 1U) * huge_page_tlb_size; ((bytes - 1U) / huage_page_size + 1U) * huage_page_size;
assert(reserved_size >= bytes); assert(reserved_size >= bytes);
void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE), void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE),
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0); (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);

@ -28,6 +28,7 @@ class Arena {
Arena(const Arena&) = delete; Arena(const Arena&) = delete;
void operator=(const Arena&) = delete; void operator=(const Arena&) = delete;
static const size_t kInlineSize = 2048;
static const size_t kMinBlockSize; static const size_t kMinBlockSize;
static const size_t kMaxBlockSize; static const size_t kMaxBlockSize;
@ -36,18 +37,19 @@ class Arena {
char* Allocate(size_t bytes); char* Allocate(size_t bytes);
// huge_page_tlb_size: if >0, allocate bytes from huge page TLB and the size // huge_page_size: if >0, will try to allocate from huage page TLB.
// of the huge page TLB. Bytes will be rounded up to multiple and 2MB and // The argument will be the size of the page size for huge page TLB. Bytes
// allocate huge pages through mmap anonymous option with huge page on. // will be rounded up to multiple of the page size to allocate through mmap
// The extra space allocated will be wasted. To enable it, need to reserve // anonymous option with huge page on. The extra space allocated will be
// huge pages for it to be allocated, like: // wasted. If allocation fails, will fall back to normal case. To enable it,
// need to reserve huge pages for it to be allocated, like:
// sysctl -w vm.nr_hugepages=20 // sysctl -w vm.nr_hugepages=20
// See linux doc Documentation/vm/hugetlbpage.txt for details. // See linux doc Documentation/vm/hugetlbpage.txt for details.
// huge page allocation can fail. In this case it will fail back to // huge page allocation can fail. In this case it will fail back to
// normal cases. The messages will be logged to logger. So when calling with // normal cases. The messages will be logged to logger. So when calling with
// huge_page_tlb_size > 0, we highly recommend a logger is passed in. // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
// Otherwise, the error message will be printed out to stderr directly. // Otherwise, the error message will be printed out to stderr directly.
char* AllocateAligned(size_t bytes, size_t huge_page_tlb_size = 0, char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
Logger* logger = nullptr); Logger* logger = nullptr);
// Returns an estimate of the total memory usage of data allocated // Returns an estimate of the total memory usage of data allocated
@ -69,6 +71,7 @@ class Arena {
size_t BlockSize() const { return kBlockSize; } size_t BlockSize() const { return kBlockSize; }
private: private:
char inline_block_[kInlineSize];
// Number of bytes allocated in one block // Number of bytes allocated in one block
const size_t kBlockSize; const size_t kBlockSize;
// Array of new[] allocated memory blocks // Array of new[] allocated memory blocks

@ -31,9 +31,11 @@ TEST(ArenaTest, MemoryAllocatedBytes) {
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
arena.Allocate(req_sz); arena.Allocate(req_sz);
} }
expected_memory_allocated = req_sz * N; expected_memory_allocated = req_sz * N + Arena::kInlineSize;
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated); ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
arena.Allocate(Arena::kInlineSize - 1);
// requested size < quarter of a block: // requested size < quarter of a block:
// allocate a block with the default size, then try to use unused part // allocate a block with the default size, then try to use unused part
// of the block. So one new block will be allocated for the first // of the block. So one new block will be allocated for the first
@ -64,12 +66,19 @@ TEST(ArenaTest, ApproximateMemoryUsageTest) {
Arena arena(kBlockSize); Arena arena(kBlockSize);
ASSERT_EQ(kZero, arena.ApproximateMemoryUsage()); ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
// allocate inline bytes
arena.AllocateAligned(8);
arena.AllocateAligned(Arena::kInlineSize / 2 - 16);
arena.AllocateAligned(Arena::kInlineSize / 2);
ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8);
ASSERT_EQ(arena.MemoryAllocatedBytes(), Arena::kInlineSize);
auto num_blocks = kBlockSize / kEntrySize; auto num_blocks = kBlockSize / kEntrySize;
// first allocation // first allocation
arena.AllocateAligned(kEntrySize); arena.AllocateAligned(kEntrySize);
auto mem_usage = arena.MemoryAllocatedBytes(); auto mem_usage = arena.MemoryAllocatedBytes();
ASSERT_EQ(mem_usage, kBlockSize); ASSERT_EQ(mem_usage, kBlockSize + Arena::kInlineSize);
auto usage = arena.ApproximateMemoryUsage(); auto usage = arena.ApproximateMemoryUsage();
ASSERT_LT(usage, mem_usage); ASSERT_LT(usage, mem_usage);
for (size_t i = 1; i < num_blocks; ++i) { for (size_t i = 1; i < num_blocks; ++i) {

Loading…
Cancel
Save