diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index e42d4a350..b58f6b0b1 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -102,6 +102,12 @@ class Cache { // function. virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0; + // Increments the reference count for the handle if it refers to an entry in + // the cache. Returns true if refcount was incremented; otherwise, returns + // false. + // REQUIRES: handle must have been returned by a method on *this. + virtual bool Ref(Handle* handle) = 0; + // Release a mapping returned by a previous Lookup(). // REQUIRES: handle must not have been released yet. // REQUIRES: handle must have been returned by a method on *this. diff --git a/util/cache_test.cc b/util/cache_test.cc index 345ef869d..0ec320584 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -303,6 +303,32 @@ TEST_P(CacheTest, EvictionPolicy) { ASSERT_EQ(-1, Lookup(200)); } +TEST_P(CacheTest, ExternalRefPinsEntries) { + Insert(100, 101); + Cache::Handle* h = cache_->Lookup(EncodeKey(100)); + ASSERT_TRUE(cache_->Ref(h)); + ASSERT_EQ(101, DecodeValue(cache_->Value(h))); + ASSERT_EQ(1U, cache_->GetUsage()); + + for (int i = 0; i < 3; ++i) { + if (i > 0) { + // First release (i == 1) corresponds to Ref(), second release (i == 2) + // corresponds to Lookup(). Then, since all external refs are released, + // the below insertions should push out the cache entry. + cache_->Release(h); + } + // double cache size because the usage bit in block cache prevents 100 from + // being evicted in the first kCacheSize iterations + for (int j = 0; j < 2 * kCacheSize + 100; j++) { + Insert(1000 + j, 2000 + j); + } + if (i < 2) { + ASSERT_EQ(101, Lookup(100)); + } + } + ASSERT_EQ(-1, Lookup(100)); +} + TEST_P(CacheTest, EvictionPolicyRef) { Insert(100, 101); Insert(101, 102); diff --git a/util/clock_cache.cc b/util/clock_cache.cc index 9a48ab9b1..15c72afcb 100644 --- a/util/clock_cache.cc +++ b/util/clock_cache.cc @@ -247,6 +247,11 @@ class ClockCacheShard : public CacheShard { Cache::Handle** handle, Cache::Priority priority) override; virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override; + // If the entry in in cache, increase reference count and return true. + // Return false otherwise. + // + // Not necessary to hold mutex_ before being called. + virtual bool Ref(Cache::Handle* handle) override; virtual void Release(Cache::Handle* handle) override; virtual void Erase(const Slice& key, uint32_t hash) override; virtual size_t GetUsage() const override; @@ -266,12 +271,6 @@ class ClockCacheShard : public CacheShard { static bool HasUsage(uint32_t flags) { return flags & kUsageBit; } static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; } - // If the entry in in cache, increase reference count and return true. - // Return false otherwise. - // - // Not necessary to hold mutex_ before being called. - bool Ref(CacheHandle* handle); - // Decrease reference count of the entry. If this decreases the count to 0, // recycle the entry. If set_usage is true, also set the usage bit. // @@ -413,7 +412,8 @@ void ClockCacheShard::Cleanup(const CleanupContext& context) { } } -bool ClockCacheShard::Ref(CacheHandle* handle) { +bool ClockCacheShard::Ref(Cache::Handle* h) { + auto handle = reinterpret_cast(h); // CAS loop to increase reference count. uint32_t flags = handle->flags.load(std::memory_order_relaxed); while (InCache(flags)) { @@ -602,7 +602,7 @@ Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) { accessor.release(); // Ref() could fail if another thread sneak in and evict/erase the cache // entry before we are able to hold reference. - if (!Ref(handle)) { + if (!Ref(reinterpret_cast(handle))) { return nullptr; } // Double check the key since the handle may now representing another key diff --git a/util/lru_cache.cc b/util/lru_cache.cc index 3d3de5336..83094266c 100644 --- a/util/lru_cache.cc +++ b/util/lru_cache.cc @@ -256,6 +256,19 @@ Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) { return reinterpret_cast(e); } +bool LRUCacheShard::Ref(Cache::Handle* h) { + LRUHandle* handle = reinterpret_cast(h); + MutexLock l(&mutex_); + if (handle->InCache()) { + if (handle->refs == 1) { + LRU_Remove(handle); + } + handle->refs++; + return true; + } + return false; +} + void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) { MutexLock l(&mutex_); high_pri_pool_ratio_ = high_pri_pool_ratio; diff --git a/util/lru_cache.h b/util/lru_cache.h index 368f4bf97..800edac04 100644 --- a/util/lru_cache.h +++ b/util/lru_cache.h @@ -177,6 +177,7 @@ class LRUCacheShard : public CacheShard { Cache::Handle** handle, Cache::Priority priority) override; virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override; + virtual bool Ref(Cache::Handle* handle) override; virtual void Release(Cache::Handle* handle) override; virtual void Erase(const Slice& key, uint32_t hash) override; diff --git a/util/sharded_cache.cc b/util/sharded_cache.cc index b529299a7..b8ed023c3 100644 --- a/util/sharded_cache.cc +++ b/util/sharded_cache.cc @@ -58,6 +58,11 @@ Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) { return GetShard(Shard(hash))->Lookup(key, hash); } +bool ShardedCache::Ref(Handle* handle) { + uint32_t hash = GetHash(handle); + return GetShard(Shard(hash))->Ref(handle); +} + void ShardedCache::Release(Handle* handle) { uint32_t hash = GetHash(handle); GetShard(Shard(hash))->Release(handle); diff --git a/util/sharded_cache.h b/util/sharded_cache.h index bdb4e77c6..d60bb4382 100644 --- a/util/sharded_cache.h +++ b/util/sharded_cache.h @@ -29,6 +29,7 @@ class CacheShard { void (*deleter)(const Slice& key, void* value), Cache::Handle** handle, Cache::Priority priority) = 0; virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0; + virtual bool Ref(Cache::Handle* handle) = 0; virtual void Release(Cache::Handle* handle) = 0; virtual void Erase(const Slice& key, uint32_t hash) = 0; virtual void SetCapacity(size_t capacity) = 0; @@ -63,6 +64,7 @@ class ShardedCache : public Cache { void (*deleter)(const Slice& key, void* value), Handle** handle, Priority priority) override; virtual Handle* Lookup(const Slice& key, Statistics* stats) override; + virtual bool Ref(Handle* handle) override; virtual void Release(Handle* handle) override; virtual void Erase(const Slice& key) override; virtual uint64_t NewId() override; diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index dce2fd0cf..0e97a6aa2 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -64,6 +64,8 @@ class SimCacheImpl : public SimCache { return cache_->Lookup(key, stats); } + virtual bool Ref(Handle* handle) override { return cache_->Ref(handle); } + virtual void Release(Handle* handle) override { cache_->Release(handle); } virtual void Erase(const Slice& key) override {