diff --git a/Makefile b/Makefile index 14139e2b0..9770fc710 100644 --- a/Makefile +++ b/Makefile @@ -322,6 +322,7 @@ TESTS = \ backupable_db_test \ document_db_test \ json_document_test \ + sim_cache_test \ spatial_db_test \ version_edit_test \ version_set_test \ @@ -953,6 +954,9 @@ document_db_test: utilities/document/document_db_test.o $(LIBOBJECTS) $(TESTHARN json_document_test: utilities/document/json_document_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) +sim_cache_test: utilities/simulator_cache/sim_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + spatial_db_test: utilities/spatialdb/spatial_db_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 638928ff5..6a2d43313 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -22,15 +22,13 @@ #ifndef STORAGE_ROCKSDB_INCLUDE_CACHE_H_ #define STORAGE_ROCKSDB_INCLUDE_CACHE_H_ -#include #include +#include #include "rocksdb/slice.h" #include "rocksdb/status.h" namespace rocksdb { -using std::shared_ptr; - class Cache; // Create a new cache with a fixed size capacity. The cache is sharded @@ -39,23 +37,23 @@ class Cache; // // The parameter num_shard_bits defaults to 4, and strict_capacity_limit // defaults to false. -extern shared_ptr NewLRUCache(size_t capacity); -extern shared_ptr NewLRUCache(size_t capacity, int num_shard_bits); -extern shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, - bool strict_capacity_limit); +extern std::shared_ptr NewLRUCache(size_t capacity); +extern std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits); +extern std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, + bool strict_capacity_limit); class Cache { public: - Cache() { } + Cache() {} // Destroys all existing entries by calling the "deleter" // function that was passed via the Insert() function. // // @See Insert - virtual ~Cache(); + virtual ~Cache() {} // Opaque handle to an entry stored in the cache. - struct Handle { }; + struct Handle {}; // Insert a mapping from key->value into the cache and assign it // the specified charge against the total cache capacity. @@ -98,9 +96,8 @@ class Cache { // underlying entry will be kept around until all existing handles // to it have been released. virtual void Erase(const Slice& key) = 0; - // Return a new numeric id. May be used by multiple clients who are - // sharing the same cache to partition the key space. Typically the + // sharding the same cache to partition the key space. Typically the // client will allocate a new id at startup and prepend the id to // its cache keys. virtual uint64_t NewId() = 0; @@ -136,8 +133,8 @@ class Cache { // memory - call this only if you're shutting down the process. // Any attempts of using cache after this call will fail terribly. // Always delete the DB object before calling this method! - virtual void DisownData() { - // default implementation is noop + virtual void DisownData(){ + // default implementation is noop }; // Apply callback to all entries in the cache @@ -157,7 +154,7 @@ class Cache { // No copying allowed Cache(const Cache&); - void operator=(const Cache&); + Cache& operator=(const Cache&); }; } // namespace rocksdb diff --git a/include/rocksdb/utilities/sim_cache.h b/include/rocksdb/utilities/sim_cache.h new file mode 100644 index 000000000..7bd6363e1 --- /dev/null +++ b/include/rocksdb/utilities/sim_cache.h @@ -0,0 +1,67 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include "rocksdb/cache.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "util/lru_cache_handle.h" + +namespace rocksdb { + +class SimCache; + +// For instrumentation purpose, use NewSimCache instead of NewLRUCache API +// NewSimCache is a wrapper function returning a SimCache instance that can +// have additional interface provided in Simcache class besides Cache interface +// to predict block cache hit rate without actually allocating the memory. It +// can help users tune their current block cache size, and determine how +// efficient they are using the memory. +extern std::shared_ptr NewSimCache(std::shared_ptr cache, + size_t sim_capacity, + int num_shard_bits); + +class SimCache : public Cache { + public: + SimCache() {} + + virtual ~SimCache() {} + + // returns the maximum configured capacity of the simcache for simulation + virtual size_t GetSimCapacity() const = 0; + + // simcache doesn't provide internal handler reference to user, so always + // PinnedUsage = 0 and the behavior will be not exactly consistent the + // with real cache. + // returns the memory size for the entries residing in the simcache. + virtual size_t GetSimUsage() const = 0; + + // sets the maximum configured capacity of the simcache. When the new + // capacity is less than the old capacity and the existing usage is + // greater than new capacity, the implementation will purge old entries + // to fit new capapicty. + virtual void SetSimCapacity(size_t capacity) = 0; + + // returns the lookup times of simcache + virtual uint64_t get_lookup_counter() const = 0; + // returns the hit times of simcache + virtual uint64_t get_hit_counter() const = 0; + // returns the hit rate of simcache + virtual double get_hit_rate() const = 0; + // reset the lookup and hit counters + virtual void reset_counter() = 0; + // String representation of the statistics of the simcache + virtual std::string ToString() const = 0; + + private: + SimCache(const SimCache&); + SimCache& operator=(const SimCache&); +}; + +} // namespace rocksdb diff --git a/src.mk b/src.mk index 14b9e580b..eca05222b 100644 --- a/src.mk +++ b/src.mk @@ -131,6 +131,7 @@ LIB_SOURCES = \ utilities/merge_operators/uint64add.cc \ utilities/options/options_util.cc \ utilities/redis/redis_lists.cc \ + utilities/simulator_cache/sim_cache.cc \ utilities/spatialdb/spatial_db.cc \ utilities/table_properties_collectors/compact_on_deletion_collector.cc \ utilities/transactions/optimistic_transaction_impl.cc \ @@ -272,6 +273,7 @@ TEST_BENCH_SOURCES = \ utilities/merge_operators/string_append/stringappend_test.cc \ utilities/options/options_util_test.cc \ utilities/redis/redis_lists_test.cc \ + utilities/simulator_cache/sim_cache_test.cc \ utilities/spatialdb/spatial_db_test.cc \ utilities/table_properties_collectors/compact_on_deletion_collector_test.cc \ utilities/transactions/optimistic_transaction_test.cc \ diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index df288fdee..5d38bd597 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -51,6 +51,7 @@ #include "rocksdb/slice_transform.h" #include "rocksdb/utilities/flashcache.h" #include "rocksdb/utilities/optimistic_transaction_db.h" +#include "rocksdb/utilities/sim_cache.h" #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "rocksdb/write_batch.h" @@ -334,8 +335,13 @@ DEFINE_int32(universal_compression_size_percent, -1, DEFINE_bool(universal_allow_trivial_move, false, "Allow trivial move in universal compaction."); -DEFINE_int64(cache_size, -1, "Number of bytes to use as a cache of uncompressed" - "data. Negative means use default settings."); +DEFINE_int64(cache_size, -1, + "Number of bytes to use as a cache of uncompressed" + " data. Negative means use default settings."); + +DEFINE_int64(simcache_size, -1, + "Number of bytes to use as a simcache of " + "uncompressed data. Negative means use default settings."); DEFINE_bool(cache_index_and_filter_blocks, false, "Cache index/filter blocks in block cache."); @@ -1808,6 +1814,16 @@ class Benchmark { merge_keys_(FLAGS_merge_keys < 0 ? FLAGS_num : FLAGS_merge_keys), report_file_operations_(FLAGS_report_file_operations), cachedev_fd_(-1) { + // use simcache instead of cache + if (FLAGS_simcache_size >= 0) { + if (FLAGS_cache_numshardbits >= 1) { + cache_ = + NewSimCache(cache_, FLAGS_simcache_size, FLAGS_cache_numshardbits); + } else { + cache_ = NewSimCache(cache_, FLAGS_simcache_size, 0); + } + } + if (report_file_operations_) { if (!FLAGS_hdfs.empty()) { fprintf(stderr, @@ -2101,6 +2117,10 @@ class Benchmark { if (FLAGS_statistics) { fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str()); } + if (FLAGS_simcache_size) { + fprintf(stdout, "SIMULATOR CACHE STATISTICS:\n%s\n", + std::dynamic_pointer_cast(cache_)->ToString().c_str()); + } } private: diff --git a/util/cache.cc b/util/cache.cc index c94530e06..ff2015c73 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -11,73 +11,19 @@ #include #include -#include "rocksdb/cache.h" #include "port/port.h" +#include "rocksdb/cache.h" #include "util/autovector.h" #include "util/hash.h" +#include "util/lru_cache_handle.h" #include "util/mutexlock.h" namespace rocksdb { -Cache::~Cache() { -} - namespace { // LRU cache implementation -// An entry is a variable length heap-allocated structure. -// Entries are referenced by cache and/or by any external entity. -// The cache keeps all its entries in table. Some elements -// are also stored on LRU list. -// -// LRUHandle can be in these states: -// 1. Referenced externally AND in hash table. -// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true) -// 2. Not referenced externally and in hash table. In that case the entry is -// in the LRU and can be freed. (refs == 1 && in_cache == true) -// 3. Referenced externally and not in hash table. In that case the entry is -// in not on LRU and not in table. (refs >= 1 && in_cache == false) -// -// All newly created LRUHandles are in state 1. If you call LRUCache::Release -// on entry in state 1, it will go into state 2. To move from state 1 to -// state 3, either call LRUCache::Erase or LRUCache::Insert with the same key. -// To move from state 2 to state 1, use LRUCache::Lookup. -// Before destruction, make sure that no handles are in state 1. This means -// that any successful LRUCache::Lookup/LRUCache::Insert have a matching -// RUCache::Release (to move into state 2) or LRUCache::Erase (for state 3) - -struct LRUHandle { - void* value; - void (*deleter)(const Slice&, void* value); - LRUHandle* next_hash; - LRUHandle* next; - LRUHandle* prev; - size_t charge; // TODO(opt): Only allow uint32_t? - size_t key_length; - uint32_t refs; // a number of refs to this entry - // cache itself is counted as 1 - bool in_cache; // true, if this entry is referenced by the hash table - uint32_t hash; // Hash of key(); used for fast sharding and comparisons - char key_data[1]; // Beginning of key - - Slice key() const { - // For cheaper lookups, we allow a temporary Handle object - // to store a pointer to a key in "value". - if (next == this) { - return *(reinterpret_cast(value)); - } else { - return Slice(key_data, key_length); - } - } - - void Free() { - assert((refs == 1 && in_cache) || (refs == 0 && !in_cache)); - (*deleter)(key(), value); - delete[] reinterpret_cast(this); - } -}; - // We provide our own simple hash table since it removes a whole bunch // of porting hacks and is also faster than some of the built-in hash // table implementations in some of the compiler/runtime combinations @@ -151,8 +97,7 @@ class HandleTable { // pointer to the trailing slot in the corresponding linked list. LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** ptr = &list_[hash & (length_ - 1)]; - while (*ptr != nullptr && - ((*ptr)->hash != hash || key != (*ptr)->key())) { + while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } return ptr; @@ -238,8 +183,7 @@ class LRUCache { // to hold (usage_ + charge) is freed or the lru list is empty // This function is not thread safe - it needs to be executed while // holding the mutex_ - void EvictFromLRU(size_t charge, - autovector* deleted); + void EvictFromLRU(size_t charge, autovector* deleted); // Initialized before use. size_t capacity_; @@ -310,9 +254,8 @@ void LRUCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t), if (thread_safe) { mutex_.Lock(); } - table_.ApplyToAllCacheEntries([callback](LRUHandle* h) { - callback(h->value, h->charge); - }); + table_.ApplyToAllCacheEntries( + [callback](LRUHandle* h) { callback(h->value, h->charge); }); if (thread_safe) { mutex_.Unlock(); } @@ -338,8 +281,7 @@ void LRUCache::LRU_Append(LRUHandle* e) { lru_usage_ += e->charge; } -void LRUCache::EvictFromLRU(size_t charge, - autovector* deleted) { +void LRUCache::EvictFromLRU(size_t charge, autovector* deleted) { while (usage_ + charge > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; assert(old->in_cache); @@ -430,7 +372,7 @@ Status LRUCache::Insert(const Slice& key, uint32_t hash, void* value, // If the cache is full, we'll have to release it // It shouldn't happen very often though. LRUHandle* e = reinterpret_cast( - new char[sizeof(LRUHandle) - 1 + key.size()]); + new char[sizeof(LRUHandle) - 1 + key.size()]); Status s; autovector last_reference_list; @@ -556,9 +498,7 @@ class ShardedLRUCache : public Cache { shards_[s].SetStrictCapacityLimit(strict_capacity_limit); } } - virtual ~ShardedLRUCache() { - delete[] shards_; - } + virtual ~ShardedLRUCache() { delete[] shards_; } virtual void SetCapacity(size_t capacity) override { int num_shards = 1 << num_shard_bits_; const size_t per_shard = (capacity + (num_shards - 1)) / num_shards; @@ -651,16 +591,16 @@ class ShardedLRUCache : public Cache { } // end anonymous namespace -shared_ptr NewLRUCache(size_t capacity) { +std::shared_ptr NewLRUCache(size_t capacity) { return NewLRUCache(capacity, kNumShardBits, false); } -shared_ptr NewLRUCache(size_t capacity, int num_shard_bits) { +std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits) { return NewLRUCache(capacity, num_shard_bits, false); } -shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, - bool strict_capacity_limit) { +std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, + bool strict_capacity_limit) { if (num_shard_bits >= 20) { return nullptr; // the cache cannot be sharded into too many fine pieces } diff --git a/util/lru_cache_handle.h b/util/lru_cache_handle.h new file mode 100644 index 000000000..1cd755a62 --- /dev/null +++ b/util/lru_cache_handle.h @@ -0,0 +1,71 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include + +#include "port/port.h" +#include "rocksdb/slice.h" + +namespace rocksdb { +// An entry is a variable length heap-allocated structure. +// Entries are referenced by cache and/or by any external entity. +// The cache keeps all its entries in table. Some elements +// are also stored on LRU list. +// +// LRUHandle can be in these states: +// 1. Referenced externally AND in hash table. +// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true) +// 2. Not referenced externally and in hash table. In that case the entry is +// in the LRU and can be freed. (refs == 1 && in_cache == true) +// 3. Referenced externally and not in hash table. In that case the entry is +// in not on LRU and not in table. (refs >= 1 && in_cache == false) +// +// All newly created LRUHandles are in state 1. If you call LRUCache::Release +// on entry in state 1, it will go into state 2. To move from state 1 to +// state 3, either call LRUCache::Erase or LRUCache::Insert with the same key. +// To move from state 2 to state 1, use LRUCache::Lookup. +// Before destruction, make sure that no handles are in state 1. This means +// that any successful LRUCache::Lookup/LRUCache::Insert have a matching +// RUCache::Release (to move into state 2) or LRUCache::Erase (for state 3) + +struct LRUHandle { + void* value; + void (*deleter)(const Slice&, void* value); + LRUHandle* next_hash; + LRUHandle* next; + LRUHandle* prev; + size_t charge; // TODO(opt): Only allow uint32_t? + size_t key_length; + uint32_t refs; // a number of refs to this entry + // cache itself is counted as 1 + bool in_cache; // true, if this entry is referenced by the hash table + uint32_t hash; // Hash of key(); used for fast sharding and comparisons + char key_data[1]; // Beginning of key + + Slice key() const { + // For cheaper lookups, we allow a temporary Handle object + // to store a pointer to a key in "value". + if (next == this) { + return *(reinterpret_cast(value)); + } else { + return Slice(key_data, key_length); + } + } + + void Free() { + assert((refs == 1 && in_cache) || (refs == 0 && !in_cache)); + (*deleter)(key(), value); + delete[] reinterpret_cast(this); + } +}; + +} // end namespace rocksdb diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc new file mode 100644 index 000000000..4e6b20bc4 --- /dev/null +++ b/utilities/simulator_cache/sim_cache.cc @@ -0,0 +1,155 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/utilities/sim_cache.h" +#include + +namespace rocksdb { + +namespace { +// SimCacheImpl definition +class SimCacheImpl : public SimCache { + public: + // capacity for real cache (ShardedLRUCache) + // test_capacity for key only cache + SimCacheImpl(std::shared_ptr cache, size_t sim_capacity, + int num_shard_bits) + : cache_(cache), + key_only_cache_(NewLRUCache(sim_capacity, num_shard_bits)), + lookup_times_(0), + hit_times_(0) {} + + virtual ~SimCacheImpl() {} + virtual void SetCapacity(size_t capacity) override { + cache_->SetCapacity(capacity); + } + + virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override { + cache_->SetStrictCapacityLimit(strict_capacity_limit); + } + + virtual Status Insert(const Slice& key, void* value, size_t charge, + void (*deleter)(const Slice& key, void* value), + Handle** handle) override { + // The handle and value passed in are for real cache, so we pass nullptr + // to key_only_cache_ for both instead. Also, the deleter function pointer + // will be called by user to perform some external operation which should + // be applied only once. Thus key_only_cache accepts an empty function. + // *Lambda function without capture can be assgined to a function pointer + Handle* h = key_only_cache_->Lookup(key); + if (h == nullptr) { + key_only_cache_->Insert(key, nullptr, charge, + [](const Slice& k, void* v) {}, nullptr); + } else { + key_only_cache_->Release(h); + } + return cache_->Insert(key, value, charge, deleter, handle); + } + + virtual Handle* Lookup(const Slice& key) override { + inc_lookup_counter(); + Handle* h = key_only_cache_->Lookup(key); + if (h != nullptr) { + key_only_cache_->Release(h); + inc_hit_counter(); + } + return cache_->Lookup(key); + } + + virtual void Release(Handle* handle) override { cache_->Release(handle); } + + virtual void Erase(const Slice& key) override { + cache_->Erase(key); + key_only_cache_->Erase(key); + } + + virtual void* Value(Handle* handle) override { + return reinterpret_cast(handle)->value; + } + + virtual uint64_t NewId() override { return cache_->NewId(); } + + virtual size_t GetCapacity() const override { return cache_->GetCapacity(); } + + virtual bool HasStrictCapacityLimit() const override { + return cache_->HasStrictCapacityLimit(); + } + + virtual size_t GetUsage() const override { return cache_->GetUsage(); } + + virtual size_t GetUsage(Handle* handle) const override { + return reinterpret_cast(handle)->charge; + } + + virtual size_t GetPinnedUsage() const override { + return cache_->GetPinnedUsage(); + } + + virtual void DisownData() override { + cache_->DisownData(); + key_only_cache_->DisownData(); + } + + virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t), + bool thread_safe) override { + // only apply to _cache since key_only_cache doesn't hold value + cache_->ApplyToAllCacheEntries(callback, thread_safe); + } + + virtual void EraseUnRefEntries() override { + cache_->EraseUnRefEntries(); + key_only_cache_->EraseUnRefEntries(); + } + + virtual size_t GetSimCapacity() const override { + return key_only_cache_->GetCapacity(); + } + virtual size_t GetSimUsage() const override { + return key_only_cache_->GetUsage(); + } + virtual void SetSimCapacity(size_t capacity) override { + key_only_cache_->SetCapacity(capacity); + } + + virtual uint64_t get_lookup_counter() const override { return lookup_times_; } + virtual uint64_t get_hit_counter() const override { return hit_times_; } + virtual double get_hit_rate() const override { + return hit_times_ * 1.0f / lookup_times_; + } + virtual void reset_counter() override { hit_times_ = lookup_times_ = 0; } + + virtual std::string ToString() const override { + std::string res; + res.append("SimCache LOOKUPs: " + std::to_string(get_lookup_counter()) + + "\n"); + res.append("SimCache HITs: " + std::to_string(get_hit_counter()) + "\n"); + char buff[100]; + snprintf(buff, sizeof(buff), "SimCache HITRATE: %.2f%%\n", + get_hit_rate() * 100); + res.append(buff); + return res; + } + + private: + std::shared_ptr cache_; + std::shared_ptr key_only_cache_; + std::atomic lookup_times_; + std::atomic hit_times_; + void inc_lookup_counter() { lookup_times_++; } + void inc_hit_counter() { hit_times_++; } +}; + +} // end anonymous namespace + +// For instrumentation purpose, use NewSimCache instead +std::shared_ptr NewSimCache(std::shared_ptr cache, + size_t sim_capacity, int num_shard_bits) { + if (num_shard_bits >= 20) { + return nullptr; // the cache cannot be sharded into too many fine pieces + } + return std::make_shared(cache, sim_capacity, num_shard_bits); +} + +} // end namespace rocksdb diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc new file mode 100644 index 000000000..c2a1a16fa --- /dev/null +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -0,0 +1,146 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/utilities/sim_cache.h" +#include +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class SimCacheTest : public DBTestBase { + private: + size_t miss_count_ = 0; + size_t hit_count_ = 0; + size_t insert_count_ = 0; + size_t failure_count_ = 0; + + public: + const size_t kNumBlocks = 5; + const size_t kValueSize = 100; + + SimCacheTest() : DBTestBase("/sim_cache_test") {} + + BlockBasedTableOptions GetTableOptions() { + BlockBasedTableOptions table_options; + // Set a small enough block size so that each key-value get its own block. + table_options.block_size = 1; + return table_options; + } + + Options GetOptions(const BlockBasedTableOptions& table_options) { + Options options = CurrentOptions(); + options.create_if_missing = true; + // options.compression = kNoCompression; + options.statistics = rocksdb::CreateDBStatistics(); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + return options; + } + + void InitTable(const Options& options) { + std::string value(kValueSize, 'a'); + for (size_t i = 0; i < kNumBlocks * 2; i++) { + ASSERT_OK(Put(ToString(i), value.c_str())); + } + } + + void RecordCacheCounters(const Options& options) { + miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS); + hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT); + insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD); + failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES); + } + + void CheckCacheCounters(const Options& options, size_t expected_misses, + size_t expected_hits, size_t expected_inserts, + size_t expected_failures) { + size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS); + size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT); + size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD); + size_t new_failure_count = + TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES); + ASSERT_EQ(miss_count_ + expected_misses, new_miss_count); + ASSERT_EQ(hit_count_ + expected_hits, new_hit_count); + ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count); + ASSERT_EQ(failure_count_ + expected_failures, new_failure_count); + miss_count_ = new_miss_count; + hit_count_ = new_hit_count; + insert_count_ = new_insert_count; + failure_count_ = new_failure_count; + } +}; + +TEST_F(SimCacheTest, SimCache) { + ReadOptions read_options; + auto table_options = GetTableOptions(); + auto options = GetOptions(table_options); + InitTable(options); + std::shared_ptr simCache = + NewSimCache(NewLRUCache(0, 0, false), 10000, 0); + table_options.block_cache = simCache; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + Reopen(options); + RecordCacheCounters(options); + + std::vector> iterators(kNumBlocks); + Iterator* iter = nullptr; + + // Load blocks into cache. + for (size_t i = 0; i < kNumBlocks; i++) { + iter = db_->NewIterator(read_options); + iter->Seek(ToString(i)); + ASSERT_OK(iter->status()); + CheckCacheCounters(options, 1, 0, 1, 0); + iterators[i].reset(iter); + } + ASSERT_EQ(kNumBlocks, simCache->get_lookup_counter()); + ASSERT_EQ(0, simCache->get_hit_counter()); + size_t usage = simCache->GetUsage(); + ASSERT_LT(0, usage); + ASSERT_EQ(usage, simCache->GetSimUsage()); + simCache->SetCapacity(usage); + simCache->SetSimCapacity(usage * 2); + ASSERT_EQ(usage, simCache->GetPinnedUsage()); + + // Test with strict capacity limit. + simCache->SetStrictCapacityLimit(true); + iter = db_->NewIterator(read_options); + iter->Seek(ToString(kNumBlocks * 2 - 1)); + ASSERT_TRUE(iter->status().IsIncomplete()); + CheckCacheCounters(options, 1, 0, 0, 1); + delete iter; + iter = nullptr; + + // Release iterators and access cache again. + for (size_t i = 0; i < kNumBlocks; i++) { + iterators[i].reset(); + CheckCacheCounters(options, 0, 0, 0, 0); + } + // Add kNumBlocks again + for (size_t i = 0; i < kNumBlocks; i++) { + std::unique_ptr it(db_->NewIterator(read_options)); + it->Seek(ToString(i)); + ASSERT_OK(it->status()); + CheckCacheCounters(options, 0, 1, 0, 0); + } + ASSERT_EQ(5, simCache->get_hit_counter()); + for (size_t i = kNumBlocks; i < kNumBlocks * 2; i++) { + std::unique_ptr it(db_->NewIterator(read_options)); + it->Seek(ToString(i)); + ASSERT_OK(it->status()); + CheckCacheCounters(options, 1, 0, 1, 0); + } + ASSERT_EQ(0, simCache->GetPinnedUsage()); + ASSERT_EQ(3 * kNumBlocks + 1, simCache->get_lookup_counter()); + ASSERT_EQ(6, simCache->get_hit_counter()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}