Allow incrementing refcount on cache handles

Summary:
Previously the only way to increment a handle's refcount was to invoke Lookup(), which (1) did hash table lookup to get cache handle, (2) incremented that handle's refcount. For a future DeleteRange optimization, I added a function, Ref(), for when the caller already has a cache handle and only needs to do (2).
Closes https://github.com/facebook/rocksdb/pull/1761

Differential Revision: D4397114

Pulled By: ajkr

fbshipit-source-id: 9addbe5
main
Andrew Kryczka 8 years ago committed by Facebook Github Bot
parent 2172b660eb
commit fe395fb63d
  1. 6
      include/rocksdb/cache.h
  2. 26
      util/cache_test.cc
  3. 16
      util/clock_cache.cc
  4. 13
      util/lru_cache.cc
  5. 1
      util/lru_cache.h
  6. 5
      util/sharded_cache.cc
  7. 2
      util/sharded_cache.h
  8. 2
      utilities/simulator_cache/sim_cache.cc

@ -102,6 +102,12 @@ class Cache {
// function.
virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
// Increments the reference count for the handle if it refers to an entry in
// the cache. Returns true if refcount was incremented; otherwise, returns
// false.
// REQUIRES: handle must have been returned by a method on *this.
virtual bool Ref(Handle* handle) = 0;
// Release a mapping returned by a previous Lookup().
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.

@ -303,6 +303,32 @@ TEST_P(CacheTest, EvictionPolicy) {
ASSERT_EQ(-1, Lookup(200));
}
TEST_P(CacheTest, ExternalRefPinsEntries) {
Insert(100, 101);
Cache::Handle* h = cache_->Lookup(EncodeKey(100));
ASSERT_TRUE(cache_->Ref(h));
ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
ASSERT_EQ(1U, cache_->GetUsage());
for (int i = 0; i < 3; ++i) {
if (i > 0) {
// First release (i == 1) corresponds to Ref(), second release (i == 2)
// corresponds to Lookup(). Then, since all external refs are released,
// the below insertions should push out the cache entry.
cache_->Release(h);
}
// double cache size because the usage bit in block cache prevents 100 from
// being evicted in the first kCacheSize iterations
for (int j = 0; j < 2 * kCacheSize + 100; j++) {
Insert(1000 + j, 2000 + j);
}
if (i < 2) {
ASSERT_EQ(101, Lookup(100));
}
}
ASSERT_EQ(-1, Lookup(100));
}
TEST_P(CacheTest, EvictionPolicyRef) {
Insert(100, 101);
Insert(101, 102);

@ -247,6 +247,11 @@ class ClockCacheShard : public CacheShard {
Cache::Handle** handle,
Cache::Priority priority) override;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
// If the entry in in cache, increase reference count and return true.
// Return false otherwise.
//
// Not necessary to hold mutex_ before being called.
virtual bool Ref(Cache::Handle* handle) override;
virtual void Release(Cache::Handle* handle) override;
virtual void Erase(const Slice& key, uint32_t hash) override;
virtual size_t GetUsage() const override;
@ -266,12 +271,6 @@ class ClockCacheShard : public CacheShard {
static bool HasUsage(uint32_t flags) { return flags & kUsageBit; }
static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; }
// If the entry in in cache, increase reference count and return true.
// Return false otherwise.
//
// Not necessary to hold mutex_ before being called.
bool Ref(CacheHandle* handle);
// Decrease reference count of the entry. If this decreases the count to 0,
// recycle the entry. If set_usage is true, also set the usage bit.
//
@ -413,7 +412,8 @@ void ClockCacheShard::Cleanup(const CleanupContext& context) {
}
}
bool ClockCacheShard::Ref(CacheHandle* handle) {
bool ClockCacheShard::Ref(Cache::Handle* h) {
auto handle = reinterpret_cast<CacheHandle*>(h);
// CAS loop to increase reference count.
uint32_t flags = handle->flags.load(std::memory_order_relaxed);
while (InCache(flags)) {
@ -602,7 +602,7 @@ Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
accessor.release();
// Ref() could fail if another thread sneak in and evict/erase the cache
// entry before we are able to hold reference.
if (!Ref(handle)) {
if (!Ref(reinterpret_cast<Cache::Handle*>(handle))) {
return nullptr;
}
// Double check the key since the handle may now representing another key

@ -256,6 +256,19 @@ Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
return reinterpret_cast<Cache::Handle*>(e);
}
bool LRUCacheShard::Ref(Cache::Handle* h) {
LRUHandle* handle = reinterpret_cast<LRUHandle*>(h);
MutexLock l(&mutex_);
if (handle->InCache()) {
if (handle->refs == 1) {
LRU_Remove(handle);
}
handle->refs++;
return true;
}
return false;
}
void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
MutexLock l(&mutex_);
high_pri_pool_ratio_ = high_pri_pool_ratio;

@ -177,6 +177,7 @@ class LRUCacheShard : public CacheShard {
Cache::Handle** handle,
Cache::Priority priority) override;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
virtual bool Ref(Cache::Handle* handle) override;
virtual void Release(Cache::Handle* handle) override;
virtual void Erase(const Slice& key, uint32_t hash) override;

@ -58,6 +58,11 @@ Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) {
return GetShard(Shard(hash))->Lookup(key, hash);
}
bool ShardedCache::Ref(Handle* handle) {
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->Ref(handle);
}
void ShardedCache::Release(Handle* handle) {
uint32_t hash = GetHash(handle);
GetShard(Shard(hash))->Release(handle);

@ -29,6 +29,7 @@ class CacheShard {
void (*deleter)(const Slice& key, void* value),
Cache::Handle** handle, Cache::Priority priority) = 0;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
virtual bool Ref(Cache::Handle* handle) = 0;
virtual void Release(Cache::Handle* handle) = 0;
virtual void Erase(const Slice& key, uint32_t hash) = 0;
virtual void SetCapacity(size_t capacity) = 0;
@ -63,6 +64,7 @@ class ShardedCache : public Cache {
void (*deleter)(const Slice& key, void* value),
Handle** handle, Priority priority) override;
virtual Handle* Lookup(const Slice& key, Statistics* stats) override;
virtual bool Ref(Handle* handle) override;
virtual void Release(Handle* handle) override;
virtual void Erase(const Slice& key) override;
virtual uint64_t NewId() override;

@ -64,6 +64,8 @@ class SimCacheImpl : public SimCache {
return cache_->Lookup(key, stats);
}
virtual bool Ref(Handle* handle) override { return cache_->Ref(handle); }
virtual void Release(Handle* handle) override { cache_->Release(handle); }
virtual void Erase(const Slice& key) override {

Loading…
Cancel
Save