Add Cache.GetPinnedUsageUsage()

Summary:
  Add the funcion Cache.GetPinnedUsage() to return the memory size of entries
  that are in use by the system (that is, all the entries not in the LRU list).

Test Plan:
  Run ./cache_test and examine PinnedUsageTest.

Reviewers: tnovak, igor

Reviewed By: igor

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D40305
main
Aaron Feldman 9 years ago
parent 4eabbdb7ec
commit 69bb210d58
  1. 1
      HISTORY.md
  2. 3
      include/rocksdb/cache.h
  3. 34
      util/cache.cc
  4. 41
      util/cache_test.cc

@ -18,6 +18,7 @@
* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table.
* DB::CompactRange() now accept CompactRangeOptions instead of multiple paramters. CompactRangeOptions is defined in include/rocksdb/options.h.
* Add force_bottommost_level_compaction option to CompactRangeOptions, which prevent compaction from skipping compacting bottommost level.
* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system.
## 3.11.0 (5/19/2015)
### New Features

@ -104,6 +104,9 @@ class Cache {
// returns the memory size for the entries residing in the cache.
virtual size_t GetUsage() const = 0;
// returns the memory size for the entries in use by the system
virtual size_t GetPinnedUsage() const = 0;
// Call this on shutdown if you want to speed it up. Cache will disown
// any underlying data and will not free it on delete. This call will leak
// memory - call this only if you're shutting down the process.

@ -203,14 +203,22 @@ class LRUCache {
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
// Although in some platforms the update of size_t is atomic, to make sure
// GetUsage() works correctly under any platforms, we'll protect this
// function with mutex.
// GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
// protect them with mutex_.
size_t GetUsage() const {
MutexLock l(&mutex_);
return usage_;
}
size_t GetPinnedUsage() const {
MutexLock l(&mutex_);
assert(usage_ >= lru_usage_);
return usage_ - lru_usage_;
}
void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
bool thread_safe);
@ -231,11 +239,16 @@ class LRUCache {
// Initialized before use.
size_t capacity_;
// Memory size for entries residing in the cache
size_t usage_;
// Memory size for entries residing only in the LRU list
size_t lru_usage_;
// mutex_ protects the following state.
// We don't count mutex_ as the cache's internal state so semantically we
// don't mind mutex_ invoking the non-const actions.
mutable port::Mutex mutex_;
size_t usage_;
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
@ -245,8 +258,7 @@ class LRUCache {
HandleTable table_;
};
LRUCache::LRUCache()
: usage_(0) {
LRUCache::LRUCache() : usage_(0), lru_usage_(0) {
// Make empty circular linked list
lru_.next = &lru_;
lru_.prev = &lru_;
@ -281,6 +293,7 @@ void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
e->prev = e->next = nullptr;
lru_usage_ -= e->charge;
}
void LRUCache::LRU_Append(LRUHandle* e) {
@ -291,6 +304,7 @@ void LRUCache::LRU_Append(LRUHandle* e) {
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
lru_usage_ += e->charge;
}
void LRUCache::EvictFromLRU(size_t charge,
@ -519,7 +533,6 @@ class ShardedLRUCache : public Cache {
virtual size_t GetUsage() const override {
// We will not lock the cache when getting the usage from shards.
// for (size_t i = 0; i < num_shard_bits_; ++i)
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
@ -527,6 +540,15 @@ class ShardedLRUCache : public Cache {
}
return usage;
}
virtual size_t GetPinnedUsage() const override {
// We will not lock the cache when getting the usage from shards.
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
usage += shards_[s].GetPinnedUsage();
}
return usage;
}
virtual void DisownData() override { shards_ = nullptr; }

@ -142,6 +142,47 @@ TEST_F(CacheTest, UsageTest) {
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
}
TEST_F(CacheTest, PinnedUsageTest) {
// cache is shared_ptr and will be automatically cleaned up.
const uint64_t kCapacity = 100000;
auto cache = NewLRUCache(kCapacity, 8);
size_t pinned_usage = 0;
const char* value = "abcdef";
// Add entries. Unpin some of them after insertion. Then, pin some of them
// again. Check GetPinnedUsage().
for (int i = 1; i < 100; ++i) {
std::string key(i, 'a');
auto kv_size = key.size() + 5;
auto handle = cache->Insert(key, (void*)value, kv_size, dumbDeleter);
pinned_usage += kv_size;
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
if (i % 2 == 0) {
cache->Release(handle);
pinned_usage -= kv_size;
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
}
if (i % 3 == 0) {
cache->Lookup(key);
// If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
// usage increased
if (i % 2 == 0) {
pinned_usage += kv_size;
}
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
}
}
// check that overloading the cache does not change the pinned usage
for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
auto key = ToString(i);
cache->Release(
cache->Insert(key, (void*)value, key.size() + 5, dumbDeleter));
}
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
}
TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));

Loading…
Cancel
Save