WriteBufferManger doens't cost to cache if no limit is set (#4695)

Summary:
WriteBufferManger is not invoked when allocating memory for memtable if the limit is not set even if a cache is passed. It is inconsistent from the comment syas. Fix it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4695

Differential Revision: D13112722

Pulled By: siying

fbshipit-source-id: 0b27eef63867f679cd06033ea56907c0569597f4
main
Siying Dong 6 years ago committed by Facebook Github Bot
parent 9d6d4867ab
commit 13579e8c5a
  1. 16
      db/db_test2.cc
  2. 7
      db/memtable.cc
  3. 2
      include/rocksdb/write_buffer_manager.h
  4. 9
      memtable/alloc_tracker.cc

@ -454,6 +454,22 @@ TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) {
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestWriteBufferNoLimitWithCache) {
Options options = CurrentOptions();
options.arena_block_size = 4096;
std::shared_ptr<Cache> cache =
NewLRUCache(LRUCacheOptions(10000000, 1, false, 0.0));
options.write_buffer_size = 50000; // this is never hit
// Use a write buffer total size so that the soft limit is about
// 105000.
options.write_buffer_manager.reset(new WriteBufferManager(0, cache));
Reopen(options);
ASSERT_OK(Put("foo", "bar"));
// One dummy entry is 1MB.
ASSERT_GT(cache->GetUsage(), 500000);
}
namespace {
void ValidateKeyExistence(DB* db, const std::vector<Slice>& keys_must_exist,
const std::vector<Slice>& keys_must_not_exist) {

@ -69,9 +69,10 @@ MemTable::MemTable(const InternalKeyComparator& cmp,
refs_(0),
kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)),
mem_tracker_(write_buffer_manager),
arena_(
moptions_.arena_block_size,
(write_buffer_manager != nullptr && write_buffer_manager->enabled())
arena_(moptions_.arena_block_size,
(write_buffer_manager != nullptr &&
(write_buffer_manager->enabled() ||
write_buffer_manager->cost_to_cache()))
? &mem_tracker_
: nullptr,
mutable_cf_options.memtable_huge_page_size),

@ -30,6 +30,8 @@ class WriteBufferManager {
bool enabled() const { return buffer_size_ != 0; }
bool cost_to_cache() const { return cache_rep_ != nullptr; }
// Only valid if enabled()
size_t memory_usage() const {
return memory_used_.load(std::memory_order_relaxed);

@ -24,7 +24,8 @@ AllocTracker::~AllocTracker() { FreeMem(); }
void AllocTracker::Allocate(size_t bytes) {
assert(write_buffer_manager_ != nullptr);
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
write_buffer_manager_->ReserveMem(bytes);
}
@ -32,7 +33,8 @@ void AllocTracker::Allocate(size_t bytes) {
void AllocTracker::DoneAllocating() {
if (write_buffer_manager_ != nullptr && !done_allocating_) {
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
write_buffer_manager_->ScheduleFreeMem(
bytes_allocated_.load(std::memory_order_relaxed));
} else {
@ -47,7 +49,8 @@ void AllocTracker::FreeMem() {
DoneAllocating();
}
if (write_buffer_manager_ != nullptr && !freed_) {
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
write_buffer_manager_->FreeMem(
bytes_allocated_.load(std::memory_order_relaxed));
} else {

Loading…
Cancel
Save