LRU cache mid-point insertion

Summary:
Add mid-point insertion functionality to LRU cache. Caller of `Cache::Insert()` can set an additional parameter to make a cache entry have higher priority. The LRU cache will reserve at most `capacity * high_pri_pool_pct` bytes for high-pri cache entries. If `high_pri_pool_pct` is zero, the cache degenerates to normal LRU cache.

Context: If we are to put index and filter blocks into RocksDB block cache, index/filter block can be swap out too early. We want to add an option to RocksDB to reserve some capacity in block cache just for index/filter blocks, to mitigate the issue.

In later diffs I'll update block based table reader to use the interface to cache index/filter blocks at high priority, and expose the option to `DBOptions` and make it dynamic changeable.

Test Plan: unit test.

Reviewers: IslamAbdelRahman, sdong, lightmark

Reviewed By: lightmark

Subscribers: andrewkr, dhruba, march, leveldb

Differential Revision: https://reviews.facebook.net/D61977
main
Yi Wu 8 years ago
parent 6a17b07ca8
commit 72f8cc703c
  1. 1
      CMakeLists.txt
  2. 5
      Makefile
  3. 15
      include/rocksdb/cache.h
  4. 15
      util/cache_test.cc
  5. 5
      util/clock_cache.cc
  6. 96
      util/lru_cache.cc
  7. 71
      util/lru_cache.h
  8. 163
      util/lru_cache_test.cc
  9. 4
      util/sharded_cache.cc
  10. 4
      util/sharded_cache.h
  11. 7
      utilities/simulator_cache/sim_cache.cc

@ -427,6 +427,7 @@ set(TESTS
util/heap_test.cc
util/histogram_test.cc
util/iostats_context_test.cc
util/lru_cache_test.cc
util/mock_env_test.cc
util/options_settable_test.cc
util/options_test.cc

@ -385,6 +385,7 @@ TESTS = \
iostats_context_test \
persistent_cache_test \
statistics_test \
lru_cache_test \
PARALLEL_TEST = \
backupable_db_test \
@ -1247,6 +1248,10 @@ persistent_cache_test: utilities/persistent_cache/persistent_cache_test.o db/db
statistics_test: util/statistics_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
lru_cache_test: util/lru_cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
#-------------------------------------------------
# make install related stuff
INSTALL_PATH ?= /usr/local

@ -33,10 +33,14 @@ class Cache;
// Create a new cache with a fixed size capacity. The cache is sharded
// to 2^num_shard_bits shards, by hash of the key. The total capacity
// is divided and evenly assigned to each shard.
// is divided and evenly assigned to each shard. If strict_capacity_limit
// is set, insert to the cache will fail when cache is full. User can also
// set percentage of the cache reserves for high priority entries via
// high_pri_pool_pct.
extern std::shared_ptr<Cache> NewLRUCache(size_t capacity,
int num_shard_bits = 6,
bool strict_capacity_limit = false);
bool strict_capacity_limit = false,
double high_pri_pool_ratio = 0.0);
// Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
// better concurrent performance in some cases. See util/clock_cache.cc for
@ -49,6 +53,10 @@ extern std::shared_ptr<Cache> NewClockCache(size_t capacity,
class Cache {
public:
// Depending on implementation, cache entries with high priority could be less
// likely to get evicted than low priority entries.
enum class Priority { HIGH, LOW };
Cache() {}
// Destroys all existing entries by calling the "deleter"
@ -80,7 +88,8 @@ class Cache {
// value will be passed to "deleter".
virtual Status Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value),
Handle** handle = nullptr) = 0;
Handle** handle = nullptr,
Priority priority = Priority::LOW) = 0;
// If the cache has no mapping for "key", returns nullptr.
//

@ -589,15 +589,20 @@ TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
ASSERT_TRUE(inserted == callback_state);
}
shared_ptr<Cache> (*newLRUCache)(size_t, int, bool) = NewLRUCache;
shared_ptr<Cache> NewLRUCacheFunc(size_t capacity, int num_shard_bits,
bool strict_capacity_limit) {
return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
}
shared_ptr<Cache> (*new_lru_cache_func)(size_t, int, bool) = NewLRUCacheFunc;
#ifdef SUPPORT_CLOCK_CACHE
shared_ptr<Cache> (*newClockCache)(size_t, int, bool) = NewClockCache;
shared_ptr<Cache> (*new_clock_cache_func)(size_t, int, bool) = NewClockCache;
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
testing::Values(NewCache(newLRUCache),
NewCache(newClockCache)));
testing::Values(NewCache(new_lru_cache_func),
NewCache(new_clock_cache_func)));
#else
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
testing::Values(NewCache(newLRUCache)));
testing::Values(NewCache(new_lru_cache_func)));
#endif // SUPPORT_CLOCK_CACHE
} // namespace rocksdb

@ -240,7 +240,8 @@ class ClockCacheShard : public CacheShard {
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
Cache::Handle** handle) override;
Cache::Handle** handle,
Cache::Priority priority) override;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
virtual void Release(Cache::Handle* handle) override;
virtual void Erase(const Slice& key, uint32_t hash) override;
@ -570,7 +571,7 @@ CacheHandle* ClockCacheShard::Insert(
Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
Cache::Handle** h) {
Cache::Handle** h, Cache::Priority priority) {
CleanupContext context;
HashTable::accessor accessor;
char* key_data = new char[key.size()];

@ -94,10 +94,12 @@ void LRUHandleTable::Resize() {
length_ = new_length;
}
LRUCacheShard::LRUCacheShard() : usage_(0), lru_usage_(0) {
LRUCacheShard::LRUCacheShard()
: usage_(0), lru_usage_(0), high_pri_pool_usage_(0) {
// Make empty circular linked list
lru_.next = &lru_;
lru_.prev = &lru_;
lru_low_pri_ = &lru_;
}
LRUCacheShard::~LRUCacheShard() {}
@ -116,12 +118,12 @@ void LRUCacheShard::EraseUnRefEntries() {
MutexLock l(&mutex_);
while (lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->in_cache);
assert(old->InCache());
assert(old->refs ==
1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->in_cache = false;
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
last_reference_list.push_back(old);
@ -145,35 +147,71 @@ void LRUCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
}
}
void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
*lru = &lru_;
*lru_low_pri = lru_low_pri_;
}
void LRUCacheShard::LRU_Remove(LRUHandle* e) {
assert(e->next != nullptr);
assert(e->prev != nullptr);
if (lru_low_pri_ == e) {
lru_low_pri_ = e->prev;
}
e->next->prev = e->prev;
e->prev->next = e->next;
e->prev = e->next = nullptr;
lru_usage_ -= e->charge;
if (e->InHighPriPool()) {
assert(high_pri_pool_usage_ >= e->charge);
high_pri_pool_usage_ -= e->charge;
}
}
void LRUCacheShard::LRU_Append(LRUHandle* e) {
// Make "e" newest entry by inserting just before lru_
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
assert(e->next == nullptr);
assert(e->prev == nullptr);
if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
// Inset "e" to head of LRU list.
e->next = &lru_;
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(true);
high_pri_pool_usage_ += e->charge;
MaintainPoolSize();
} else {
// Insert "e" to the head of low-pri pool. Note that when
// high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
e->next = lru_low_pri_->next;
e->prev = lru_low_pri_;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
lru_low_pri_ = e;
}
lru_usage_ += e->charge;
}
void LRUCacheShard::MaintainPoolSize() {
while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
// Overflow last entry in high-pri pool to low-pri pool.
lru_low_pri_ = lru_low_pri_->next;
assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false);
high_pri_pool_usage_ -= lru_low_pri_->charge;
}
}
void LRUCacheShard::EvictFromLRU(size_t charge,
autovector<LRUHandle*>* deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->in_cache);
assert(old->InCache());
assert(old->refs == 1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->in_cache = false;
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
deleted->push_back(old);
@ -185,6 +223,7 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
{
MutexLock l(&mutex_);
capacity_ = capacity;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
EvictFromLRU(0, &last_reference_list);
}
// we free the entries here outside of mutex for
@ -203,7 +242,7 @@ Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
assert(e->in_cache);
assert(e->InCache());
if (e->refs == 1) {
LRU_Remove(e);
}
@ -212,6 +251,13 @@ Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
MutexLock l(&mutex_);
high_pri_pool_ratio_ = high_pri_pool_ratio;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
MaintainPoolSize();
}
void LRUCacheShard::Release(Cache::Handle* handle) {
if (handle == nullptr) {
return;
@ -224,7 +270,7 @@ void LRUCacheShard::Release(Cache::Handle* handle) {
if (last_reference) {
usage_ -= e->charge;
}
if (e->refs == 1 && e->in_cache) {
if (e->refs == 1 && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
if (usage_ > capacity_) {
// the cache is full
@ -232,13 +278,13 @@ void LRUCacheShard::Release(Cache::Handle* handle) {
assert(lru_.next == &lru_);
// take this opportunity and remove the item
table_.Remove(e->key(), e->hash);
e->in_cache = false;
e->SetInCache(false);
Unref(e);
usage_ -= e->charge;
last_reference = true;
} else {
// put the item on the list to be potentially freed
LRU_Append(e);
LRU_Insert(e);
}
}
}
@ -252,7 +298,7 @@ void LRUCacheShard::Release(Cache::Handle* handle) {
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
Cache::Handle** handle) {
Cache::Handle** handle, Cache::Priority priority) {
// Allocate the memory here outside of the mutex
// If the cache is full, we'll have to release it
// It shouldn't happen very often though.
@ -270,7 +316,8 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
? 1
: 2); // One from LRUCache, one for the returned handle
e->next = e->prev = nullptr;
e->in_cache = true;
e->SetInCache(true);
e->SetPriority(priority);
memcpy(e->key_data, key.data(), key.size());
{
@ -295,7 +342,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
LRUHandle* old = table_.Insert(e);
usage_ += e->charge;
if (old != nullptr) {
old->in_cache = false;
old->SetInCache(false);
if (Unref(old)) {
usage_ -= old->charge;
// old is on LRU because it's in cache and its reference count
@ -305,7 +352,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
}
}
if (handle == nullptr) {
LRU_Append(e);
LRU_Insert(e);
} else {
*handle = reinterpret_cast<Cache::Handle*>(e);
}
@ -333,10 +380,10 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
if (last_reference) {
usage_ -= e->charge;
}
if (last_reference && e->in_cache) {
if (last_reference && e->InCache()) {
LRU_Remove(e);
}
e->in_cache = false;
e->SetInCache(false);
}
}
@ -360,12 +407,16 @@ size_t LRUCacheShard::GetPinnedUsage() const {
class LRUCache : public ShardedCache {
public:
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit)
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
int num_shards = 1 << num_shard_bits;
shards_ = new LRUCacheShard[num_shards];
SetCapacity(capacity);
SetStrictCapacityLimit(strict_capacity_limit);
for (int i = 0; i < num_shards; i++) {
shards_[i].SetHighPriorityPoolRatio(high_pri_pool_ratio);
}
}
virtual ~LRUCache() { delete[] shards_; }
@ -398,12 +449,17 @@ class LRUCache : public ShardedCache {
};
std::shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit) {
bool strict_capacity_limit,
double high_pri_pool_ratio) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
// invalid high_pri_pool_ratio
return nullptr;
}
return std::make_shared<LRUCache>(capacity, num_shard_bits,
strict_capacity_limit);
strict_capacity_limit, high_pri_pool_ratio);
}
} // namespace rocksdb

@ -51,8 +51,15 @@ struct LRUHandle {
size_t key_length;
uint32_t refs; // a number of refs to this entry
// cache itself is counted as 1
bool in_cache; // true, if this entry is referenced by the hash table
// Include the following flags:
// in_cache: whether this entry is referenced by the hash table.
// is_high_pri: whether this entry is high priority entry.
// in_high_pro_pool: whether this entry is in high-pri pool.
char flags;
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char key_data[1]; // Beginning of key
Slice key() const {
@ -65,9 +72,39 @@ struct LRUHandle {
}
}
bool InCache() { return flags & 1; }
bool IsHighPri() { return flags & 2; }
bool InHighPriPool() { return flags & 4; }
void SetInCache(bool in_cache) {
if (in_cache) {
flags |= 1;
} else {
flags &= ~1;
}
}
void SetPriority(Cache::Priority priority) {
if (priority == Cache::Priority::HIGH) {
flags |= 2;
} else {
flags &= ~2;
}
}
void SetInHighPriPool(bool in_high_pri_pool) {
if (in_high_pri_pool) {
flags |= 4;
} else {
flags &= ~4;
}
}
void Free() {
assert((refs == 1 && in_cache) || (refs == 0 && !in_cache));
assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
if (deleter) {
(*deleter)(key(), value);
}
delete[] reinterpret_cast<char*>(this);
}
};
@ -92,7 +129,7 @@ class LRUHandleTable {
LRUHandle* h = list_[i];
while (h != nullptr) {
auto n = h->next_hash;
assert(h->in_cache);
assert(h->InCache());
func(h);
h = n;
}
@ -128,11 +165,15 @@ class LRUCacheShard : public CacheShard {
// Set the flag to reject insertion if cache if full.
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
// Set percentage of capacity reserved for high-pri cache entries.
void SetHighPriorityPoolRatio(double high_pri_pool_ratio);
// Like Cache methods, but with an extra "hash" parameter.
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
Cache::Handle** handle) override;
Cache::Handle** handle,
Cache::Priority priority) override;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
virtual void Release(Cache::Handle* handle) override;
virtual void Erase(const Slice& key, uint32_t hash) override;
@ -149,9 +190,16 @@ class LRUCacheShard : public CacheShard {
virtual void EraseUnRefEntries() override;
void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri);
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* e);
void LRU_Insert(LRUHandle* e);
// Overflow the last entry in high-pri pool to low-pri pool until size of
// high-pri pool is no larger than the size specify by high_pri_pool_pct.
void MaintainPoolSize();
// Just reduce the reference count by 1.
// Return true if last reference
bool Unref(LRUHandle* e);
@ -171,9 +219,19 @@ class LRUCacheShard : public CacheShard {
// Memory size for entries residing only in the LRU list
size_t lru_usage_;
// Memory size for entries in high-pri pool.
size_t high_pri_pool_usage_;
// Whether to reject insertion if cache reaches its full capacity.
bool strict_capacity_limit_;
// Ratio of capacity reserved for high priority cache entries.
double high_pri_pool_ratio_;
// High-pri pool size, equals to capacity * high_pri_pool_ratio.
// Remember the value to avoid recomputing each time.
double high_pri_pool_capacity_;
// mutex_ protects the following state.
// We don't count mutex_ as the cache's internal state so semantically we
// don't mind mutex_ invoking the non-const actions.
@ -184,6 +242,9 @@ class LRUCacheShard : public CacheShard {
// LRU contains items which can be evicted, ie reference only by cache
LRUHandle lru_;
// Pointer to head of low-pri pool in LRU list.
LRUHandle* lru_low_pri_;
LRUHandleTable table_;
};

@ -0,0 +1,163 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include "util/lru_cache.h"
#include <string>
#include <vector>
#include "util/testharness.h"
namespace rocksdb {
class LRUCacheTest : public testing::Test {
public:
LRUCacheTest() {}
~LRUCacheTest() {}
void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0) {
cache_.reset(new LRUCacheShard());
cache_->SetCapacity(capacity);
cache_->SetStrictCapacityLimit(false);
cache_->SetHighPriorityPoolRatio(high_pri_pool_ratio);
}
void Insert(const std::string& key,
Cache::Priority priority = Cache::Priority::LOW) {
cache_->Insert(key, 0 /*hash*/, nullptr /*value*/, 1 /*charge*/,
nullptr /*deleter*/, nullptr /*handle*/, priority);
}
void Insert(char key, Cache::Priority priority = Cache::Priority::LOW) {
Insert(std::string(1, key), priority);
}
bool Lookup(const std::string& key) {
auto handle = cache_->Lookup(key, 0 /*hash*/);
if (handle) {
cache_->Release(handle);
return true;
}
return false;
}
bool Lookup(char key) { return Lookup(std::string(1, key)); }
void Erase(const std::string& key) { cache_->Erase(key, 0 /*hash*/); }
void ValidateLRUList(std::vector<std::string> keys,
size_t num_high_pri_pool_keys = 0) {
LRUHandle* lru;
LRUHandle* lru_low_pri;
cache_->TEST_GetLRUList(&lru, &lru_low_pri);
LRUHandle* iter = lru;
bool in_high_pri_pool = false;
size_t high_pri_pool_keys = 0;
if (iter == lru_low_pri) {
in_high_pri_pool = true;
}
for (const auto& key : keys) {
iter = iter->next;
ASSERT_NE(lru, iter);
ASSERT_EQ(key, iter->key().ToString());
ASSERT_EQ(in_high_pri_pool, iter->InHighPriPool());
if (in_high_pri_pool) {
high_pri_pool_keys++;
}
if (iter == lru_low_pri) {
ASSERT_FALSE(in_high_pri_pool);
in_high_pri_pool = true;
}
}
ASSERT_EQ(lru, iter->next);
ASSERT_TRUE(in_high_pri_pool);
ASSERT_EQ(num_high_pri_pool_keys, high_pri_pool_keys);
}
private:
std::unique_ptr<LRUCacheShard> cache_;
};
TEST_F(LRUCacheTest, BasicLRU) {
NewCache(5);
for (char ch = 'a'; ch <= 'e'; ch++) {
Insert(ch);
}
ValidateLRUList({"a", "b", "c", "d", "e"});
for (char ch = 'x'; ch <= 'z'; ch++) {
Insert(ch);
}
ValidateLRUList({"d", "e", "x", "y", "z"});
ASSERT_FALSE(Lookup("b"));
ValidateLRUList({"d", "e", "x", "y", "z"});
ASSERT_TRUE(Lookup("e"));
ValidateLRUList({"d", "x", "y", "z", "e"});
ASSERT_TRUE(Lookup("z"));
ValidateLRUList({"d", "x", "y", "e", "z"});
Erase("x");
ValidateLRUList({"d", "y", "e", "z"});
ASSERT_TRUE(Lookup("d"));
ValidateLRUList({"y", "e", "z", "d"});
Insert("u");
ValidateLRUList({"y", "e", "z", "d", "u"});
Insert("v");
ValidateLRUList({"e", "z", "d", "u", "v"});
}
TEST_F(LRUCacheTest, MidPointInsertion) {
// Allocate 2 cache entries to high-pri pool.
NewCache(5, 0.45);
Insert("a", Cache::Priority::LOW);
Insert("b", Cache::Priority::LOW);
Insert("c", Cache::Priority::LOW);
ValidateLRUList({"a", "b", "c"}, 0);
// Low-pri entries can take high-pri pool capacity if available
Insert("u", Cache::Priority::LOW);
Insert("v", Cache::Priority::LOW);
ValidateLRUList({"a", "b", "c", "u", "v"}, 0);
Insert("X", Cache::Priority::HIGH);
Insert("Y", Cache::Priority::HIGH);
ValidateLRUList({"c", "u", "v", "X", "Y"}, 2);
// High-pri entries can overflow to low-pri pool.
Insert("Z", Cache::Priority::HIGH);
ValidateLRUList({"u", "v", "X", "Y", "Z"}, 2);
// Low-pri entries will be inserted to head of low-pri pool.
Insert("a", Cache::Priority::LOW);
ValidateLRUList({"v", "X", "a", "Y", "Z"}, 2);
// Low-pri entries will be inserted to head of low-pri pool after lookup.
ASSERT_TRUE(Lookup("v"));
ValidateLRUList({"X", "a", "v", "Y", "Z"}, 2);
// High-pri entries will be inserted to the head of the list after lookup.
ASSERT_TRUE(Lookup("X"));
ValidateLRUList({"a", "v", "Y", "Z", "X"}, 2);
ASSERT_TRUE(Lookup("Z"));
ValidateLRUList({"a", "v", "Y", "X", "Z"}, 2);
Erase("Y");
ValidateLRUList({"a", "v", "X", "Z"}, 2);
Erase("X");
ValidateLRUList({"a", "v", "Z"}, 1);
Insert("d", Cache::Priority::LOW);
Insert("e", Cache::Priority::LOW);
ValidateLRUList({"a", "v", "d", "e", "Z"}, 1);
Insert("f", Cache::Priority::LOW);
Insert("g", Cache::Priority::LOW);
ValidateLRUList({"d", "e", "f", "g", "Z"}, 1);
ASSERT_TRUE(Lookup("d"));
ValidateLRUList({"e", "f", "g", "d", "Z"}, 1);
}
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -40,10 +40,10 @@ void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value),
Handle** handle) {
Handle** handle, Priority priority) {
uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))
->Insert(key, hash, value, charge, deleter, handle);
->Insert(key, hash, value, charge, deleter, handle, priority);
}
Cache::Handle* ShardedCache::Lookup(const Slice& key) {

@ -26,7 +26,7 @@ class CacheShard {
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
Cache::Handle** handle) = 0;
Cache::Handle** handle, Cache::Priority priority) = 0;
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
virtual void Release(Cache::Handle* handle) = 0;
virtual void Erase(const Slice& key, uint32_t hash) = 0;
@ -59,7 +59,7 @@ class ShardedCache : public Cache {
virtual Status Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value),
Handle** handle) override;
Handle** handle, Priority priority) override;
virtual Handle* Lookup(const Slice& key) override;
virtual void Release(Handle* handle) override;
virtual void Erase(const Slice& key) override;

@ -34,7 +34,7 @@ class SimCacheImpl : public SimCache {
virtual Status Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value),
Handle** handle) override {
Handle** handle, Priority priority) override {
// The handle and value passed in are for real cache, so we pass nullptr
// to key_only_cache_ for both instead. Also, the deleter function pointer
// will be called by user to perform some external operation which should
@ -43,11 +43,12 @@ class SimCacheImpl : public SimCache {
Handle* h = key_only_cache_->Lookup(key);
if (h == nullptr) {
key_only_cache_->Insert(key, nullptr, charge,
[](const Slice& k, void* v) {}, nullptr);
[](const Slice& k, void* v) {}, nullptr,
priority);
} else {
key_only_cache_->Release(h);
}
return cache_->Insert(key, value, charge, deleter, handle);
return cache_->Insert(key, value, charge, deleter, handle, priority);
}
virtual Handle* Lookup(const Slice& key) override {

Loading…
Cancel
Save