// Copyright (c) 2013, Facebook, Inc. All rights reserved. // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/arena.h" #include #include #include "rocksdb/env.h" namespace rocksdb { const size_t Arena::kMinBlockSize = 4096; const size_t Arena::kMaxBlockSize = 2 << 30; static const int kAlignUnit = sizeof(void*); size_t OptimizeBlockSize(size_t block_size) { // Make sure block_size is in optimal range block_size = std::max(Arena::kMinBlockSize, block_size); block_size = std::min(Arena::kMaxBlockSize, block_size); // make sure block_size is the multiple of kAlignUnit if (block_size % kAlignUnit != 0) { block_size = (1 + block_size / kAlignUnit) * kAlignUnit; } return block_size; } Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) { assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize && kBlockSize % kAlignUnit == 0); } Arena::~Arena() { for (const auto& block : blocks_) { delete[] block; } for (const auto& mmap_info : huge_blocks_) { auto ret = munmap(mmap_info.addr_, mmap_info.length_); if (ret != 0) { // TODO(sdong): Better handling } } } char* Arena::AllocateFallback(size_t bytes, bool aligned) { if (bytes > kBlockSize / 4) { ++irregular_block_num; // Object is more than a quarter of our block size. Allocate it separately // to avoid wasting too much space in leftover bytes. return AllocateNewBlock(bytes); } // We waste the remaining space in the current block. auto block_head = AllocateNewBlock(kBlockSize); alloc_bytes_remaining_ = kBlockSize - bytes; if (aligned) { aligned_alloc_ptr_ = block_head + bytes; unaligned_alloc_ptr_ = block_head + kBlockSize; return block_head; } else { aligned_alloc_ptr_ = block_head; unaligned_alloc_ptr_ = block_head + kBlockSize - bytes; return unaligned_alloc_ptr_; } } char* Arena::AllocateAligned(size_t bytes, size_t huge_page_tlb_size, Logger* logger) { assert((kAlignUnit & (kAlignUnit - 1)) == 0); // Pointer size should be a power of 2 #ifdef OS_LINUX if (huge_page_tlb_size > 0 && bytes > 0) { // Allocate from a huge page TBL table. assert(logger != nullptr); // logger need to be passed in. size_t reserved_size = ((bytes - 1U) / huge_page_tlb_size + 1U) * huge_page_tlb_size; assert(reserved_size >= bytes); #ifdef MAP_HUGETLB void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0); #else void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANONYMOUS), 0, 0); #endif if (addr == MAP_FAILED) { Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s", strerror(errno)); // fail back to malloc } else { blocks_memory_ += reserved_size; huge_blocks_.push_back(MmapInfo(addr, reserved_size)); return reinterpret_cast(addr); } } #endif size_t current_mod = reinterpret_cast(aligned_alloc_ptr_) & (kAlignUnit - 1); size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod); size_t needed = bytes + slop; char* result; if (needed <= alloc_bytes_remaining_) { result = aligned_alloc_ptr_ + slop; aligned_alloc_ptr_ += needed; alloc_bytes_remaining_ -= needed; } else { // AllocateFallback always returned aligned memory result = AllocateFallback(bytes, true /* aligned */); } assert((reinterpret_cast(result) & (kAlignUnit - 1)) == 0); return result; } char* Arena::AllocateNewBlock(size_t block_bytes) { char* block = new char[block_bytes]; blocks_memory_ += block_bytes; blocks_.push_back(block); return block; } } // namespace rocksdb