From badbff65b7caf255e093cd313ffd862f7651adcd Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 23 Aug 2016 13:53:49 -0700 Subject: [PATCH] Not insert into block cache if cache is full and not holding handle Summary: We used to allow insert into full block cache as long as `strict_capacity_limit=false`. This diff further restrict insert to full cache if caller don't intent to hold handle to the cache entry after insert. Hope this diff fix the assertion failure with db_stress: https://our.intern.facebook.com/intern/sandcastle/log/?instance_id=211853102&step_id=2475070014 db_stress: util/lru_cache.cc:278: virtual void rocksdb::LRUCacheShard::Release(rocksdb::Cache::Handle*): Assertion `lru_.next == &lru_' failed. The assertion at lru_cache.cc:278 can fail when an entry is inserted into full cache and stay in LRU list. Test Plan: make all check Reviewers: IslamAbdelRahman, lightmark, sdong Reviewed By: sdong Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D62325 --- db/db_block_cache_test.cc | 5 +---- util/cache_test.cc | 8 +++++--- util/clock_cache.cc | 28 +++++++++++++++++++--------- util/lru_cache.cc | 7 +++++-- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index f54764982..8869cb3b1 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -172,7 +172,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { InitTable(options); std::shared_ptr cache = NewLRUCache(0, 0, false); - std::shared_ptr compressed_cache = NewLRUCache(0, 0, false); + std::shared_ptr compressed_cache = NewLRUCache(1 << 25, 0, false); table_options.block_cache = cache; table_options.block_cache_compressed = compressed_cache; options.table_factory.reset(new BlockBasedTableFactory(table_options)); @@ -204,9 +204,6 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { cache->SetCapacity(usage); cache->SetStrictCapacityLimit(true); ASSERT_EQ(usage, cache->GetPinnedUsage()); - // compressed_cache->SetCapacity(compressed_usage); - compressed_cache->SetCapacity(0); - // compressed_cache->SetStrictCapacityLimit(true); iter = db_->NewIterator(read_options); iter->Seek(ToString(kNumBlocks - 1)); ASSERT_TRUE(iter->status().IsIncomplete()); diff --git a/util/cache_test.cc b/util/cache_test.cc index 18eda8852..3ea8c763c 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -480,7 +480,7 @@ TEST_P(CacheTest, SetStrictCapacityLimit) { for (size_t i = 0; i < 10; i++) { std::string key = ToString(i + 1); s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); - ASSERT_TRUE(s.ok()); + ASSERT_OK(s); ASSERT_NE(nullptr, handles[i]); } @@ -502,7 +502,7 @@ TEST_P(CacheTest, SetStrictCapacityLimit) { for (size_t i = 0; i < 5; i++) { std::string key = ToString(i + 1); s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); - ASSERT_TRUE(s.ok()); + ASSERT_OK(s); ASSERT_NE(nullptr, handles[i]); } s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle); @@ -510,8 +510,10 @@ TEST_P(CacheTest, SetStrictCapacityLimit) { ASSERT_EQ(nullptr, handle); // test insert without handle s = cache2->Insert(extra_key, extra_value, 1, &deleter); - ASSERT_TRUE(s.IsIncomplete()); + // AS if the key have been inserted into cache but get evicted immediately. + ASSERT_OK(s); ASSERT_EQ(5, cache->GetUsage()); + ASSERT_EQ(nullptr, cache2->Lookup(extra_key)); for (size_t i = 0; i < 5; i++) { cache2->Release(handles[i]); diff --git a/util/clock_cache.cc b/util/clock_cache.cc index efee820d9..6e0ee1514 100644 --- a/util/clock_cache.cc +++ b/util/clock_cache.cc @@ -187,6 +187,10 @@ struct CacheHandle { CacheHandle(const CacheHandle& a) { *this = a; } + CacheHandle(const Slice& k, void* v, + void (*del)(const Slice& key, void* value)) + : key(k), value(v), deleter(del) {} + CacheHandle& operator=(const CacheHandle& a) { // Only copy members needed for deletion. key = a.key; @@ -526,7 +530,11 @@ CacheHandle* ClockCacheShard::Insert( MutexLock l(&mutex_); bool success = EvictFromCache(charge, context); bool strict = strict_capacity_limit_.load(std::memory_order_relaxed); - if (!success && strict) { + if (!success && (strict || !hold_reference)) { + context->to_delete_key.push_back(key.data()); + if (!hold_reference) { + context->to_delete_value.emplace_back(key, value, deleter); + } return nullptr; } // Grab available handle from recycle bin. If recycle bin is empty, create @@ -571,20 +579,22 @@ CacheHandle* ClockCacheShard::Insert( Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value), - Cache::Handle** h, Cache::Priority priority) { + Cache::Handle** out_handle, + Cache::Priority priority) { CleanupContext context; HashTable::accessor accessor; char* key_data = new char[key.size()]; memcpy(key_data, key.data(), key.size()); Slice key_copy(key_data, key.size()); - CacheHandle* handle = - Insert(key_copy, hash, value, charge, deleter, h != nullptr, &context); + CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter, + out_handle != nullptr, &context); Status s; - if (h != nullptr) { - *h = reinterpret_cast(handle); - } - if (handle == nullptr) { - s = Status::Incomplete("Insert failed due to LRU cache being full."); + if (out_handle != nullptr) { + if (handle == nullptr) { + s = Status::Incomplete("Insert failed due to LRU cache being full."); + } else { + *out_handle = reinterpret_cast(handle); + } } Cleanup(context); return s; diff --git a/util/lru_cache.cc b/util/lru_cache.cc index dd8fd5a9d..cac9a0459 100644 --- a/util/lru_cache.cc +++ b/util/lru_cache.cc @@ -327,14 +327,17 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value, // is freed or the lru list is empty EvictFromLRU(charge, &last_reference_list); - if (strict_capacity_limit_ && usage_ - lru_usage_ + charge > capacity_) { + if (usage_ - lru_usage_ + charge > capacity_ && + (strict_capacity_limit_ || handle == nullptr)) { if (handle == nullptr) { + // Don't insert the entry but still return ok, as if the entry inserted + // into cache and get evicted immediately. last_reference_list.push_back(e); } else { delete[] reinterpret_cast(e); *handle = nullptr; + s = Status::Incomplete("Insert failed due to LRU cache being full."); } - s = Status::Incomplete("Insert failed due to LRU cache being full."); } else { // insert into the cache // note that the cache might get larger than its capacity if not enough