diff --git a/db/db_test2.cc b/db/db_test2.cc index c03d89ca8..afde6d153 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -454,6 +454,22 @@ TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } +TEST_F(DBTest2, TestWriteBufferNoLimitWithCache) { + Options options = CurrentOptions(); + options.arena_block_size = 4096; + std::shared_ptr cache = + NewLRUCache(LRUCacheOptions(10000000, 1, false, 0.0)); + options.write_buffer_size = 50000; // this is never hit + // Use a write buffer total size so that the soft limit is about + // 105000. + options.write_buffer_manager.reset(new WriteBufferManager(0, cache)); + Reopen(options); + + ASSERT_OK(Put("foo", "bar")); + // One dummy entry is 1MB. + ASSERT_GT(cache->GetUsage(), 500000); +} + namespace { void ValidateKeyExistence(DB* db, const std::vector& keys_must_exist, const std::vector& keys_must_not_exist) { diff --git a/db/memtable.cc b/db/memtable.cc index e9f4d06a6..5bf55473f 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -69,12 +69,13 @@ MemTable::MemTable(const InternalKeyComparator& cmp, refs_(0), kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)), mem_tracker_(write_buffer_manager), - arena_( - moptions_.arena_block_size, - (write_buffer_manager != nullptr && write_buffer_manager->enabled()) - ? &mem_tracker_ - : nullptr, - mutable_cf_options.memtable_huge_page_size), + arena_(moptions_.arena_block_size, + (write_buffer_manager != nullptr && + (write_buffer_manager->enabled() || + write_buffer_manager->cost_to_cache())) + ? &mem_tracker_ + : nullptr, + mutable_cf_options.memtable_huge_page_size), table_(ioptions.memtable_factory->CreateMemTableRep( comparator_, &arena_, mutable_cf_options.prefix_extractor.get(), ioptions.info_log, column_family_id)), diff --git a/include/rocksdb/write_buffer_manager.h b/include/rocksdb/write_buffer_manager.h index 856cf4b24..dea904c18 100644 --- a/include/rocksdb/write_buffer_manager.h +++ b/include/rocksdb/write_buffer_manager.h @@ -30,6 +30,8 @@ class WriteBufferManager { bool enabled() const { return buffer_size_ != 0; } + bool cost_to_cache() const { return cache_rep_ != nullptr; } + // Only valid if enabled() size_t memory_usage() const { return memory_used_.load(std::memory_order_relaxed); diff --git a/memtable/alloc_tracker.cc b/memtable/alloc_tracker.cc index 9889cc423..a1fa4938c 100644 --- a/memtable/alloc_tracker.cc +++ b/memtable/alloc_tracker.cc @@ -24,7 +24,8 @@ AllocTracker::~AllocTracker() { FreeMem(); } void AllocTracker::Allocate(size_t bytes) { assert(write_buffer_manager_ != nullptr); - if (write_buffer_manager_->enabled()) { + if (write_buffer_manager_->enabled() || + write_buffer_manager_->cost_to_cache()) { bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); write_buffer_manager_->ReserveMem(bytes); } @@ -32,7 +33,8 @@ void AllocTracker::Allocate(size_t bytes) { void AllocTracker::DoneAllocating() { if (write_buffer_manager_ != nullptr && !done_allocating_) { - if (write_buffer_manager_->enabled()) { + if (write_buffer_manager_->enabled() || + write_buffer_manager_->cost_to_cache()) { write_buffer_manager_->ScheduleFreeMem( bytes_allocated_.load(std::memory_order_relaxed)); } else { @@ -47,7 +49,8 @@ void AllocTracker::FreeMem() { DoneAllocating(); } if (write_buffer_manager_ != nullptr && !freed_) { - if (write_buffer_manager_->enabled()) { + if (write_buffer_manager_->enabled() || + write_buffer_manager_->cost_to_cache()) { write_buffer_manager_->FreeMem( bytes_allocated_.load(std::memory_order_relaxed)); } else {