Arena to inline 2KB of data in it.

Summary:
In order to use arena to a use case that the total allocation size might be small (LogBuffer is already such a case), inline 1KB of data in it, so that it can be mostly in stack or inline in another class.

If always inlining 2KB is a concern, I could make it a template to determine what to inline. However, dependents need to changes. Doesn't go with it for now

Test Plan: make all check.

Reviewers: haobo, igor, yhchiang, dhruba

Reviewed By: haobo

CC: leveldb

Differential Revision: https://reviews.facebook.net/D18609
main
sdong 11 years ago
parent 1ef31b6ca1
commit 3e4a9ec241
  1. 4
      db/db_test.cc
  2. 10
      util/arena.cc
  3. 15
      util/arena.h
  4. 13
      util/arena_test.cc

@ -2365,9 +2365,9 @@ TEST(DBTest, NumImmutableMemTable) {
ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty(
handles_[1], "rocksdb.cur-size-active-mem-table", &num));
// "208" is the size of the metadata of an empty skiplist, this would
// "200" is the size of the metadata of an empty skiplist, this would
// break if we change the default skiplist implementation
ASSERT_EQ(num, "208");
ASSERT_EQ(num, "200");
SetPerfLevel(kDisable);
} while (ChangeCompactOptions());
}

@ -34,6 +34,10 @@ size_t OptimizeBlockSize(size_t block_size) {
Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) {
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
kBlockSize % kAlignUnit == 0);
alloc_bytes_remaining_ = sizeof(inline_block_);
blocks_memory_ += alloc_bytes_remaining_;
aligned_alloc_ptr_ = inline_block_;
unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
}
Arena::~Arena() {
@ -71,17 +75,17 @@ char* Arena::AllocateFallback(size_t bytes, bool aligned) {
}
}
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_tlb_size,
char* Arena::AllocateAligned(size_t bytes, size_t huage_page_size,
Logger* logger) {
assert((kAlignUnit & (kAlignUnit - 1)) ==
0); // Pointer size should be a power of 2
#ifdef MAP_HUGETLB
if (huge_page_tlb_size > 0 && bytes > 0) {
if (huage_page_size > 0 && bytes > 0) {
// Allocate from a huge page TBL table.
assert(logger != nullptr); // logger need to be passed in.
size_t reserved_size =
((bytes - 1U) / huge_page_tlb_size + 1U) * huge_page_tlb_size;
((bytes - 1U) / huage_page_size + 1U) * huage_page_size;
assert(reserved_size >= bytes);
void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE),
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);

@ -28,6 +28,7 @@ class Arena {
Arena(const Arena&) = delete;
void operator=(const Arena&) = delete;
static const size_t kInlineSize = 2048;
static const size_t kMinBlockSize;
static const size_t kMaxBlockSize;
@ -36,18 +37,19 @@ class Arena {
char* Allocate(size_t bytes);
// huge_page_tlb_size: if >0, allocate bytes from huge page TLB and the size
// of the huge page TLB. Bytes will be rounded up to multiple and 2MB and
// allocate huge pages through mmap anonymous option with huge page on.
// The extra space allocated will be wasted. To enable it, need to reserve
// huge pages for it to be allocated, like:
// huge_page_size: if >0, will try to allocate from huage page TLB.
// The argument will be the size of the page size for huge page TLB. Bytes
// will be rounded up to multiple of the page size to allocate through mmap
// anonymous option with huge page on. The extra space allocated will be
// wasted. If allocation fails, will fall back to normal case. To enable it,
// need to reserve huge pages for it to be allocated, like:
// sysctl -w vm.nr_hugepages=20
// See linux doc Documentation/vm/hugetlbpage.txt for details.
// huge page allocation can fail. In this case it will fail back to
// normal cases. The messages will be logged to logger. So when calling with
// huge_page_tlb_size > 0, we highly recommend a logger is passed in.
// Otherwise, the error message will be printed out to stderr directly.
char* AllocateAligned(size_t bytes, size_t huge_page_tlb_size = 0,
char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
Logger* logger = nullptr);
// Returns an estimate of the total memory usage of data allocated
@ -69,6 +71,7 @@ class Arena {
size_t BlockSize() const { return kBlockSize; }
private:
char inline_block_[kInlineSize];
// Number of bytes allocated in one block
const size_t kBlockSize;
// Array of new[] allocated memory blocks

@ -31,9 +31,11 @@ TEST(ArenaTest, MemoryAllocatedBytes) {
for (int i = 0; i < N; i++) {
arena.Allocate(req_sz);
}
expected_memory_allocated = req_sz * N;
expected_memory_allocated = req_sz * N + Arena::kInlineSize;
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
arena.Allocate(Arena::kInlineSize - 1);
// requested size < quarter of a block:
// allocate a block with the default size, then try to use unused part
// of the block. So one new block will be allocated for the first
@ -64,12 +66,19 @@ TEST(ArenaTest, ApproximateMemoryUsageTest) {
Arena arena(kBlockSize);
ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
// allocate inline bytes
arena.AllocateAligned(8);
arena.AllocateAligned(Arena::kInlineSize / 2 - 16);
arena.AllocateAligned(Arena::kInlineSize / 2);
ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8);
ASSERT_EQ(arena.MemoryAllocatedBytes(), Arena::kInlineSize);
auto num_blocks = kBlockSize / kEntrySize;
// first allocation
arena.AllocateAligned(kEntrySize);
auto mem_usage = arena.MemoryAllocatedBytes();
ASSERT_EQ(mem_usage, kBlockSize);
ASSERT_EQ(mem_usage, kBlockSize + Arena::kInlineSize);
auto usage = arena.ApproximateMemoryUsage();
ASSERT_LT(usage, mem_usage);
for (size_t i = 1; i < num_blocks; ++i) {

Loading…
Cancel
Save