Add erase option to release cache

Summary:
This is useful when we put the entries in the block cache for accounting
purposes and do not expect it to be used after it is released. If the cache does not
erase the item in such cases not only the performance of cache is
negatively affected but the item's destructor not being called at the
time of release might violate the assumptions about the lifetime of the
object.

The new change adds a force_erase option to the Release method and
returns a boolean to indicate whehter the item is successfully deleted.
Closes https://github.com/facebook/rocksdb/pull/2180

Differential Revision: D4916032

Pulled By: maysamyabandeh

fbshipit-source-id: 94409a346069923cac9de8e57adc313b4ed46f28
main
Maysam Yabandeh 7 years ago committed by Facebook Github Bot
parent 04d58970cb
commit 4c9447d889
  1. 31
      cache/cache_test.cc
  2. 51
      cache/clock_cache.cc
  3. 9
      cache/lru_cache.cc
  4. 3
      cache/lru_cache.h
  5. 4
      cache/sharded_cache.cc
  6. 4
      cache/sharded_cache.h
  7. 13
      include/rocksdb/cache.h
  8. 4
      utilities/simulator_cache/sim_cache.cc

@ -475,6 +475,37 @@ void deleter(const Slice& key, void* value) {
}
} // namespace
TEST_P(CacheTest, ReleaseAndErase) {
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
Cache::Handle* handle;
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
&CacheTest::Deleter, &handle);
ASSERT_TRUE(s.ok());
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(1U, cache->GetUsage());
ASSERT_EQ(0U, deleted_keys_.size());
auto erased = cache->Release(handle, true);
ASSERT_TRUE(erased);
// This tests that deleter has been called
ASSERT_EQ(1U, deleted_keys_.size());
}
TEST_P(CacheTest, ReleaseWithoutErase) {
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
Cache::Handle* handle;
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
&CacheTest::Deleter, &handle);
ASSERT_TRUE(s.ok());
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(1U, cache->GetUsage());
ASSERT_EQ(0U, deleted_keys_.size());
auto erased = cache->Release(handle);
ASSERT_FALSE(erased);
// This tests that deleter is not called. When cache has free capacity it is
// not expected to immediately erase the released items.
ASSERT_EQ(0U, deleted_keys_.size());
}
TEST_P(CacheTest, SetCapacity) {
// test1: increase capacity
// lets create a cache with capacity 5,

@ -26,7 +26,6 @@ std::shared_ptr<Cache> NewClockCache(size_t capacity, int num_shard_bits,
#include <assert.h>
#include <atomic>
#include <deque>
#include <limits>
#include "tbb/concurrent_hash_map.h"
@ -252,8 +251,11 @@ class ClockCacheShard : public CacheShard {
//
// Not necessary to hold mutex_ before being called.
virtual bool Ref(Cache::Handle* handle) override;
virtual void Release(Cache::Handle* handle) override;
virtual bool Release(Cache::Handle* handle,
bool force_erase = false) override;
virtual void Erase(const Slice& key, uint32_t hash) override;
bool EraseAndConfirm(const Slice& key, uint32_t hash,
CleanupContext* context);
virtual size_t GetUsage() const override;
virtual size_t GetPinnedUsage() const override;
virtual void EraseUnRefEntries() override;
@ -274,13 +276,17 @@ class ClockCacheShard : public CacheShard {
// Decrease reference count of the entry. If this decreases the count to 0,
// recycle the entry. If set_usage is true, also set the usage bit.
//
// returns true if a value is erased.
//
// Not necessary to hold mutex_ before being called.
void Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
bool Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
// Unset in-cache bit of the entry. Recycle the handle if necessary.
//
// returns true if a value is erased.
//
// Has to hold mutex_ before being called.
void UnsetInCache(CacheHandle* handle, CleanupContext* context);
bool UnsetInCache(CacheHandle* handle, CleanupContext* context);
// Put the handle back to recycle_ list, and put the value associated with
// it into to-be-deleted list. It doesn't cleanup the key as it might be
@ -432,7 +438,7 @@ bool ClockCacheShard::Ref(Cache::Handle* h) {
return false;
}
void ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
CleanupContext* context) {
if (set_usage) {
handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
@ -451,9 +457,10 @@ void ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
RecycleHandle(handle, context);
}
}
return context->to_delete_value.size();
}
void ClockCacheShard::UnsetInCache(CacheHandle* handle,
bool ClockCacheShard::UnsetInCache(CacheHandle* handle,
CleanupContext* context) {
mutex_.AssertHeld();
// Use acquire-release semantics as previous operations on the cache entry
@ -465,6 +472,7 @@ void ClockCacheShard::UnsetInCache(CacheHandle* handle,
if (InCache(flags) && CountRefs(flags) == 0) {
RecycleHandle(handle, context);
}
return context->to_delete_value.size();
}
bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
@ -618,27 +626,36 @@ Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
return reinterpret_cast<Cache::Handle*>(handle);
}
void ClockCacheShard::Release(Cache::Handle* h) {
bool ClockCacheShard::Release(Cache::Handle* h, bool force_erase) {
CleanupContext context;
CacheHandle* handle = reinterpret_cast<CacheHandle*>(h);
Unref(handle, true, &context);
bool erased = Unref(handle, true, &context);
if (force_erase && !erased) {
erased = EraseAndConfirm(handle->key, handle->hash, &context);
}
Cleanup(context);
return erased;
}
void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
CleanupContext context;
{
MutexLock l(&mutex_);
HashTable::accessor accessor;
if (table_.find(accessor, CacheKey(key, hash))) {
CacheHandle* handle = accessor->second;
table_.erase(accessor);
UnsetInCache(handle, &context);
}
}
EraseAndConfirm(key, hash, &context);
Cleanup(context);
}
bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
CleanupContext* context) {
MutexLock l(&mutex_);
HashTable::accessor accessor;
bool erased = false;
if (table_.find(accessor, CacheKey(key, hash))) {
CacheHandle* handle = accessor->second;
table_.erase(accessor);
erased = UnsetInCache(handle, context);
}
return erased;
}
void ClockCacheShard::EraseUnRefEntries() {
CleanupContext context;
{

@ -273,9 +273,9 @@ void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
MaintainPoolSize();
}
void LRUCacheShard::Release(Cache::Handle* handle) {
bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
if (handle == nullptr) {
return;
return false;
}
LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
bool last_reference = false;
@ -287,10 +287,10 @@ void LRUCacheShard::Release(Cache::Handle* handle) {
}
if (e->refs == 1 && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
if (usage_ > capacity_) {
if (usage_ > capacity_ || force_erase) {
// the cache is full
// The LRU list must be empty since the cache is full
assert(lru_.next == &lru_);
assert(!(usage_ > capacity_) || lru_.next == &lru_);
// take this opportunity and remove the item
table_.Remove(e->key(), e->hash);
e->SetInCache(false);
@ -308,6 +308,7 @@ void LRUCacheShard::Release(Cache::Handle* handle) {
if (last_reference) {
e->Free();
}
return last_reference;
}
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,

3
cache/lru_cache.h vendored

@ -178,7 +178,8 @@ class LRUCacheShard : public CacheShard {
Cache::Priority priority) override;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
virtual bool Ref(Cache::Handle* handle) override;
virtual void Release(Cache::Handle* handle) override;
virtual bool Release(Cache::Handle* handle,
bool force_erase = false) override;
virtual void Erase(const Slice& key, uint32_t hash) override;
// Although in some platforms the update of size_t is atomic, to make sure

@ -63,9 +63,9 @@ bool ShardedCache::Ref(Handle* handle) {
return GetShard(Shard(hash))->Ref(handle);
}
void ShardedCache::Release(Handle* handle) {
bool ShardedCache::Release(Handle* handle, bool force_erase) {
uint32_t hash = GetHash(handle);
GetShard(Shard(hash))->Release(handle);
return GetShard(Shard(hash))->Release(handle, force_erase);
}
void ShardedCache::Erase(const Slice& key) {

@ -30,7 +30,7 @@ class CacheShard {
Cache::Handle** handle, Cache::Priority priority) = 0;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
virtual bool Ref(Cache::Handle* handle) = 0;
virtual void Release(Cache::Handle* handle) = 0;
virtual bool Release(Cache::Handle* handle, bool force_erase = false) = 0;
virtual void Erase(const Slice& key, uint32_t hash) = 0;
virtual void SetCapacity(size_t capacity) = 0;
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
@ -65,7 +65,7 @@ class ShardedCache : public Cache {
Handle** handle, Priority priority) override;
virtual Handle* Lookup(const Slice& key, Statistics* stats) override;
virtual bool Ref(Handle* handle) override;
virtual void Release(Handle* handle) override;
virtual bool Release(Handle* handle, bool force_erase = false) override;
virtual void Erase(const Slice& key) override;
virtual uint64_t NewId() override;
virtual size_t GetCapacity() const override;

@ -110,10 +110,19 @@ class Cache {
// REQUIRES: handle must have been returned by a method on *this.
virtual bool Ref(Handle* handle) = 0;
// Release a mapping returned by a previous Lookup().
/**
* Release a mapping returned by a previous Lookup(). A released entry might
* still remain in cache in case it is later looked up by others. If
* force_erase is set then it also erase it from the cache if there is no
* other reference to it. Erasing it should call the deleter function that
* was provided when the
* entry was inserted.
*
* Returns true if the entry was also erased.
*/
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.
virtual void Release(Handle* handle) = 0;
virtual bool Release(Handle* handle, bool force_erase = false) = 0;
// Return the value encapsulated in a handle returned by a
// successful Lookup().

@ -66,7 +66,9 @@ class SimCacheImpl : public SimCache {
virtual bool Ref(Handle* handle) override { return cache_->Ref(handle); }
virtual void Release(Handle* handle) override { cache_->Release(handle); }
virtual bool Release(Handle* handle, bool force_erase = false) override {
return cache_->Release(handle, force_erase);
}
virtual void Erase(const Slice& key) override {
cache_->Erase(key);

Loading…
Cancel
Save