Add filter/index/data secondary cache hits stats (#11246)

Summary:
Add more stats for better visibility into the usefulness of the secondary cache.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11246

Test Plan: Add a new unit test

Reviewed By: akankshamahajan15

Differential Revision: D43521364

Pulled By: anand1976

fbshipit-source-id: a92f04884e738a9bf40ad4047acaaaea343838a7
oxigraph-8.1.1
anand76 2 years ago committed by Facebook GitHub Bot
parent b7e73501d8
commit cf09917c18
  1. 3
      HISTORY.md
  2. 18
      cache/lru_cache.cc
  3. 67
      cache/lru_cache_test.cc
  4. 5
      include/rocksdb/statistics.h
  5. 12
      java/rocksjni/portal.h
  6. 5
      monitoring/statistics.cc

@ -6,6 +6,9 @@
### Bug Fixes ### Bug Fixes
* Fixed an issue for backward iteration when `ReadOptions::iter_start_ts` is specified in combination with BlobDB. * Fixed an issue for backward iteration when `ReadOptions::iter_start_ts` is specified in combination with BlobDB.
### New Features
* Add statistics rocksdb.secondary.cache.filter.hits, rocksdb.secondary.cache.index.hits, and rocksdb.secondary.cache.filter.hits
## 8.0.0 (02/19/2023) ## 8.0.0 (02/19/2023)
### Behavior changes ### Behavior changes
* `ReadOptions::verify_checksums=false` disables checksum verification for more reads of non-`CacheEntryRole::kDataBlock` blocks. * `ReadOptions::verify_checksums=false` disables checksum verification for more reads of non-`CacheEntryRole::kDataBlock` blocks.

18
cache/lru_cache.cc vendored

@ -587,17 +587,29 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
e->Unref(); e->Unref();
e->Free(table_.GetAllocator()); e->Free(table_.GetAllocator());
e = nullptr; e = nullptr;
} else {
PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
RecordTick(stats, SECONDARY_CACHE_HITS);
} }
} }
} else { } else {
// If wait is false, we always return a handle and let the caller // If wait is false, we always return a handle and let the caller
// release the handle after checking for success or failure. // release the handle after checking for success or failure.
e->SetIsPending(true); e->SetIsPending(true);
}
if (e) {
// This may be slightly inaccurate, if the lookup eventually fails. // This may be slightly inaccurate, if the lookup eventually fails.
// But the probability is very low. // But the probability is very low.
switch (helper->role) {
case CacheEntryRole::kFilterBlock:
RecordTick(stats, SECONDARY_CACHE_FILTER_HITS);
break;
case CacheEntryRole::kIndexBlock:
RecordTick(stats, SECONDARY_CACHE_INDEX_HITS);
break;
case CacheEntryRole::kDataBlock:
RecordTick(stats, SECONDARY_CACHE_DATA_HITS);
break;
default:
break;
}
PERF_COUNTER_ADD(secondary_cache_hit_count, 1); PERF_COUNTER_ADD(secondary_cache_hit_count, 1);
RecordTick(stats, SECONDARY_CACHE_HITS); RecordTick(stats, SECONDARY_CACHE_HITS);
} }

@ -1212,6 +1212,73 @@ TEST_F(LRUCacheSecondaryCacheTest, BasicTest) {
secondary_cache.reset(); secondary_cache.reset();
} }
TEST_F(LRUCacheSecondaryCacheTest, StatsTest) {
LRUCacheOptions opts(1024 /* capacity */, 0 /* num_shard_bits */,
false /* strict_capacity_limit */,
0.5 /* high_pri_pool_ratio */,
nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
kDontChargeCacheMetadata);
std::shared_ptr<TestSecondaryCache> secondary_cache =
std::make_shared<TestSecondaryCache>(4096);
opts.secondary_cache = secondary_cache;
std::shared_ptr<Cache> cache = NewLRUCache(opts);
std::shared_ptr<Statistics> stats = CreateDBStatistics();
CacheKey k1 = CacheKey::CreateUniqueForCacheLifetime(cache.get());
CacheKey k2 = CacheKey::CreateUniqueForCacheLifetime(cache.get());
CacheKey k3 = CacheKey::CreateUniqueForCacheLifetime(cache.get());
Cache::CacheItemHelper filter_helper = helper_;
Cache::CacheItemHelper index_helper = helper_;
Cache::CacheItemHelper data_helper = helper_;
filter_helper.role = CacheEntryRole::kFilterBlock;
index_helper.role = CacheEntryRole::kIndexBlock;
data_helper.role = CacheEntryRole::kDataBlock;
Random rnd(301);
// Start with warming secondary cache
std::string str1 = rnd.RandomString(1020);
std::string str2 = rnd.RandomString(1020);
std::string str3 = rnd.RandomString(1020);
ASSERT_OK(secondary_cache->InsertSaved(k1.AsSlice(), str1));
ASSERT_OK(secondary_cache->InsertSaved(k2.AsSlice(), str2));
ASSERT_OK(secondary_cache->InsertSaved(k3.AsSlice(), str3));
get_perf_context()->Reset();
Cache::Handle* handle;
handle =
cache->Lookup(k1.AsSlice(), &filter_helper,
/*context*/ this, Cache::Priority::LOW, true, stats.get());
ASSERT_NE(handle, nullptr);
ASSERT_EQ(static_cast<TestItem*>(cache->Value(handle))->Size(), str1.size());
cache->Release(handle);
handle =
cache->Lookup(k2.AsSlice(), &index_helper,
/*context*/ this, Cache::Priority::LOW, true, stats.get());
ASSERT_NE(handle, nullptr);
ASSERT_EQ(static_cast<TestItem*>(cache->Value(handle))->Size(), str2.size());
cache->Release(handle);
handle =
cache->Lookup(k3.AsSlice(), &data_helper,
/*context*/ this, Cache::Priority::LOW, true, stats.get());
ASSERT_NE(handle, nullptr);
ASSERT_EQ(static_cast<TestItem*>(cache->Value(handle))->Size(), str3.size());
cache->Release(handle);
ASSERT_EQ(secondary_cache->num_inserts(), 3u);
ASSERT_EQ(secondary_cache->num_lookups(), 3u);
ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_HITS),
secondary_cache->num_lookups());
ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_FILTER_HITS), 1);
ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_INDEX_HITS), 1);
ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_DATA_HITS), 1);
PerfContext perf_ctx = *get_perf_context();
ASSERT_EQ(perf_ctx.secondary_cache_hit_count, secondary_cache->num_lookups());
cache.reset();
secondary_cache.reset();
}
TEST_F(LRUCacheSecondaryCacheTest, BasicFailTest) { TEST_F(LRUCacheSecondaryCacheTest, BasicFailTest) {
LRUCacheOptions opts(1024 /* capacity */, 0 /* num_shard_bits */, LRUCacheOptions opts(1024 /* capacity */, 0 /* num_shard_bits */,
false /* strict_capacity_limit */, false /* strict_capacity_limit */,

@ -415,6 +415,11 @@ enum Tickers : uint32_t {
// Number of errors returned to the async read callback // Number of errors returned to the async read callback
ASYNC_READ_ERROR_COUNT, ASYNC_READ_ERROR_COUNT,
// Fine grained secondary cache stats
SECONDARY_CACHE_FILTER_HITS,
SECONDARY_CACHE_INDEX_HITS,
SECONDARY_CACHE_DATA_HITS,
TICKER_ENUM_MAX TICKER_ENUM_MAX
}; };

@ -5119,6 +5119,12 @@ class TickerTypeJni {
return -0x35; return -0x35;
case ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT: case ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT:
return -0x36; return -0x36;
case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_FILTER_HITS:
return -0x37;
case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_INDEX_HITS:
return -0x38;
case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_DATA_HITS:
return -0x39;
case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX: case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX:
// 0x5F was the max value in the initial copy of tickers to Java. // 0x5F was the max value in the initial copy of tickers to Java.
// Since these values are exposed directly to Java clients, we keep // Since these values are exposed directly to Java clients, we keep
@ -5470,6 +5476,12 @@ class TickerTypeJni {
return ROCKSDB_NAMESPACE::Tickers::READ_ASYNC_MICROS; return ROCKSDB_NAMESPACE::Tickers::READ_ASYNC_MICROS;
case -0x36: case -0x36:
return ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT; return ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT;
case -0x37:
return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_FILTER_HITS;
case -0x38:
return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_INDEX_HITS;
case -0x39:
return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_DATA_HITS;
case 0x5F: case 0x5F:
// 0x5F was the max value in the initial copy of tickers to Java. // 0x5F was the max value in the initial copy of tickers to Java.
// Since these values are exposed directly to Java clients, we keep // Since these values are exposed directly to Java clients, we keep

@ -213,7 +213,10 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{BLOB_DB_CACHE_BYTES_READ, "rocksdb.blobdb.cache.bytes.read"}, {BLOB_DB_CACHE_BYTES_READ, "rocksdb.blobdb.cache.bytes.read"},
{BLOB_DB_CACHE_BYTES_WRITE, "rocksdb.blobdb.cache.bytes.write"}, {BLOB_DB_CACHE_BYTES_WRITE, "rocksdb.blobdb.cache.bytes.write"},
{READ_ASYNC_MICROS, "rocksdb.read.async.micros"}, {READ_ASYNC_MICROS, "rocksdb.read.async.micros"},
{ASYNC_READ_ERROR_COUNT, "rocksdb.async.read.error.count"}}; {ASYNC_READ_ERROR_COUNT, "rocksdb.async.read.error.count"},
{SECONDARY_CACHE_FILTER_HITS, "rocksdb.secondary.cache.filter.hits"},
{SECONDARY_CACHE_INDEX_HITS, "rocksdb.secondary.cache.index.hits"},
{SECONDARY_CACHE_DATA_HITS, "rocksdb.secondary.cache.data.hits"}};
const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = { const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{DB_GET, "rocksdb.db.get.micros"}, {DB_GET, "rocksdb.db.get.micros"},

Loading…
Cancel
Save