diff --git a/HISTORY.md b/HISTORY.md index db2c08021..6a8594233 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -6,6 +6,9 @@ ### Bug Fixes * Fixed an issue for backward iteration when `ReadOptions::iter_start_ts` is specified in combination with BlobDB. +### New Features +* Add statistics rocksdb.secondary.cache.filter.hits, rocksdb.secondary.cache.index.hits, and rocksdb.secondary.cache.filter.hits + ## 8.0.0 (02/19/2023) ### Behavior changes * `ReadOptions::verify_checksums=false` disables checksum verification for more reads of non-`CacheEntryRole::kDataBlock` blocks. diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index 95cd320a7..bcdd2c6b8 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -587,17 +587,29 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash, e->Unref(); e->Free(table_.GetAllocator()); e = nullptr; - } else { - PERF_COUNTER_ADD(secondary_cache_hit_count, 1); - RecordTick(stats, SECONDARY_CACHE_HITS); } } } else { // If wait is false, we always return a handle and let the caller // release the handle after checking for success or failure. e->SetIsPending(true); + } + if (e) { // This may be slightly inaccurate, if the lookup eventually fails. // But the probability is very low. + switch (helper->role) { + case CacheEntryRole::kFilterBlock: + RecordTick(stats, SECONDARY_CACHE_FILTER_HITS); + break; + case CacheEntryRole::kIndexBlock: + RecordTick(stats, SECONDARY_CACHE_INDEX_HITS); + break; + case CacheEntryRole::kDataBlock: + RecordTick(stats, SECONDARY_CACHE_DATA_HITS); + break; + default: + break; + } PERF_COUNTER_ADD(secondary_cache_hit_count, 1); RecordTick(stats, SECONDARY_CACHE_HITS); } diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index 3ea67e40b..8eb20df45 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -1212,6 +1212,73 @@ TEST_F(LRUCacheSecondaryCacheTest, BasicTest) { secondary_cache.reset(); } +TEST_F(LRUCacheSecondaryCacheTest, StatsTest) { + LRUCacheOptions opts(1024 /* capacity */, 0 /* num_shard_bits */, + false /* strict_capacity_limit */, + 0.5 /* high_pri_pool_ratio */, + nullptr /* memory_allocator */, kDefaultToAdaptiveMutex, + kDontChargeCacheMetadata); + std::shared_ptr secondary_cache = + std::make_shared(4096); + opts.secondary_cache = secondary_cache; + std::shared_ptr cache = NewLRUCache(opts); + std::shared_ptr stats = CreateDBStatistics(); + CacheKey k1 = CacheKey::CreateUniqueForCacheLifetime(cache.get()); + CacheKey k2 = CacheKey::CreateUniqueForCacheLifetime(cache.get()); + CacheKey k3 = CacheKey::CreateUniqueForCacheLifetime(cache.get()); + Cache::CacheItemHelper filter_helper = helper_; + Cache::CacheItemHelper index_helper = helper_; + Cache::CacheItemHelper data_helper = helper_; + filter_helper.role = CacheEntryRole::kFilterBlock; + index_helper.role = CacheEntryRole::kIndexBlock; + data_helper.role = CacheEntryRole::kDataBlock; + + Random rnd(301); + // Start with warming secondary cache + std::string str1 = rnd.RandomString(1020); + std::string str2 = rnd.RandomString(1020); + std::string str3 = rnd.RandomString(1020); + ASSERT_OK(secondary_cache->InsertSaved(k1.AsSlice(), str1)); + ASSERT_OK(secondary_cache->InsertSaved(k2.AsSlice(), str2)); + ASSERT_OK(secondary_cache->InsertSaved(k3.AsSlice(), str3)); + + get_perf_context()->Reset(); + Cache::Handle* handle; + handle = + cache->Lookup(k1.AsSlice(), &filter_helper, + /*context*/ this, Cache::Priority::LOW, true, stats.get()); + ASSERT_NE(handle, nullptr); + ASSERT_EQ(static_cast(cache->Value(handle))->Size(), str1.size()); + cache->Release(handle); + + handle = + cache->Lookup(k2.AsSlice(), &index_helper, + /*context*/ this, Cache::Priority::LOW, true, stats.get()); + ASSERT_NE(handle, nullptr); + ASSERT_EQ(static_cast(cache->Value(handle))->Size(), str2.size()); + cache->Release(handle); + + handle = + cache->Lookup(k3.AsSlice(), &data_helper, + /*context*/ this, Cache::Priority::LOW, true, stats.get()); + ASSERT_NE(handle, nullptr); + ASSERT_EQ(static_cast(cache->Value(handle))->Size(), str3.size()); + cache->Release(handle); + + ASSERT_EQ(secondary_cache->num_inserts(), 3u); + ASSERT_EQ(secondary_cache->num_lookups(), 3u); + ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_HITS), + secondary_cache->num_lookups()); + ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_FILTER_HITS), 1); + ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_INDEX_HITS), 1); + ASSERT_EQ(stats->getTickerCount(SECONDARY_CACHE_DATA_HITS), 1); + PerfContext perf_ctx = *get_perf_context(); + ASSERT_EQ(perf_ctx.secondary_cache_hit_count, secondary_cache->num_lookups()); + + cache.reset(); + secondary_cache.reset(); +} + TEST_F(LRUCacheSecondaryCacheTest, BasicFailTest) { LRUCacheOptions opts(1024 /* capacity */, 0 /* num_shard_bits */, false /* strict_capacity_limit */, diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index b9e027b60..ce550f5bc 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -415,6 +415,11 @@ enum Tickers : uint32_t { // Number of errors returned to the async read callback ASYNC_READ_ERROR_COUNT, + // Fine grained secondary cache stats + SECONDARY_CACHE_FILTER_HITS, + SECONDARY_CACHE_INDEX_HITS, + SECONDARY_CACHE_DATA_HITS, + TICKER_ENUM_MAX }; diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 82fa3b48b..9d36ec863 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -5119,6 +5119,12 @@ class TickerTypeJni { return -0x35; case ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT: return -0x36; + case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_FILTER_HITS: + return -0x37; + case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_INDEX_HITS: + return -0x38; + case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_DATA_HITS: + return -0x39; case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX: // 0x5F was the max value in the initial copy of tickers to Java. // Since these values are exposed directly to Java clients, we keep @@ -5470,6 +5476,12 @@ class TickerTypeJni { return ROCKSDB_NAMESPACE::Tickers::READ_ASYNC_MICROS; case -0x36: return ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT; + case -0x37: + return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_FILTER_HITS; + case -0x38: + return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_INDEX_HITS; + case -0x39: + return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_DATA_HITS; case 0x5F: // 0x5F was the max value in the initial copy of tickers to Java. // Since these values are exposed directly to Java clients, we keep diff --git a/monitoring/statistics.cc b/monitoring/statistics.cc index f6dd61651..476694746 100644 --- a/monitoring/statistics.cc +++ b/monitoring/statistics.cc @@ -213,7 +213,10 @@ const std::vector> TickersNameMap = { {BLOB_DB_CACHE_BYTES_READ, "rocksdb.blobdb.cache.bytes.read"}, {BLOB_DB_CACHE_BYTES_WRITE, "rocksdb.blobdb.cache.bytes.write"}, {READ_ASYNC_MICROS, "rocksdb.read.async.micros"}, - {ASYNC_READ_ERROR_COUNT, "rocksdb.async.read.error.count"}}; + {ASYNC_READ_ERROR_COUNT, "rocksdb.async.read.error.count"}, + {SECONDARY_CACHE_FILTER_HITS, "rocksdb.secondary.cache.filter.hits"}, + {SECONDARY_CACHE_INDEX_HITS, "rocksdb.secondary.cache.index.hits"}, + {SECONDARY_CACHE_DATA_HITS, "rocksdb.secondary.cache.data.hits"}}; const std::vector> HistogramsNameMap = { {DB_GET, "rocksdb.db.get.micros"},