diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index 2275867d1..bb1d06254 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -404,9 +404,9 @@ void ClockCacheShard::ApplyToSomeEntries( *state = index_end << (32 - length_bits); } - table_.ApplyToEntriesRange( + table_.ConstApplyToEntriesRange( [callback, - metadata_charge_policy = metadata_charge_policy_](ClockHandle* h) { + metadata_charge_policy = metadata_charge_policy_](const ClockHandle* h) { callback(h->key(), h->value, h->GetCharge(metadata_charge_policy), h->deleter); }, @@ -616,7 +616,7 @@ size_t ClockCacheShard::GetPinnedUsage() const { size_t clock_usage = 0; table_.ConstApplyToEntriesRange( - [&clock_usage](ClockHandle* h) { + [&clock_usage](const ClockHandle* h) { if (h->ExternalRefs() > 1) { // We check > 1 because we are holding an external ref. clock_usage += h->total_charge; diff --git a/cache/clock_cache.h b/cache/clock_cache.h index 7a24b0bc6..e495f1c04 100644 --- a/cache/clock_cache.h +++ b/cache/clock_cache.h @@ -576,8 +576,8 @@ class ClockHandleTable { // metadata. void Free(autovector* deleted); - template - void ApplyToEntriesRange(T func, uint32_t index_begin, uint32_t index_end, + void ApplyToEntriesRange(std::function func, + uint32_t index_begin, uint32_t index_end, bool apply_if_will_be_deleted) { for (uint32_t i = index_begin; i < index_end; i++) { ClockHandle* h = &array_[i]; @@ -591,9 +591,8 @@ class ClockHandleTable { } } - template - void ConstApplyToEntriesRange(T func, uint32_t index_begin, - uint32_t index_end, + void ConstApplyToEntriesRange(std::function func, + uint32_t index_begin, uint32_t index_end, bool apply_if_will_be_deleted) const { for (uint32_t i = index_begin; i < index_end; i++) { ClockHandle* h = &array_[i]; diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index d2cf32316..68221d4d0 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -1294,10 +1294,12 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { const size_t capacity = size_t{1} << 25; int iterations_tested = 0; for (bool partition : {false, true}) { - for (std::shared_ptr cache : {NewLRUCache(capacity)}) { - // This test doesn't support FastLRUCache nor ClockCache because the - // keys used are not 16B long. - // TODO(guido) Add support for FastLRUCache and ClockCache. + for (std::shared_ptr cache : + {NewLRUCache(capacity), + ExperimentalNewClockCache(capacity, 1 /*estimated_value_size*/, + -1 /*num_shard_bits*/, + false /*strict_capacity_limit*/, + kDefaultCacheMetadataChargePolicy)}) { if (!cache) { // Skip clock cache when not supported continue; @@ -1450,9 +1452,16 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { // even if the cache is full ClearCache(cache.get()); Cache::Handle* h = nullptr; - ASSERT_OK(cache->Insert("Fill-it-up", nullptr, capacity + 1, - GetNoopDeleterForRole(), - &h, Cache::Priority::HIGH)); + if (strcmp(cache->Name(), "LRUCache") == 0) { + ASSERT_OK(cache->Insert("Fill-it-up", nullptr, capacity + 1, + GetNoopDeleterForRole(), + &h, Cache::Priority::HIGH)); + } else { + // For ClockCache we use a 16-byte key. + ASSERT_OK(cache->Insert("Fill-it-up-xxxxx", nullptr, capacity + 1, + GetNoopDeleterForRole(), + &h, Cache::Priority::HIGH)); + } ASSERT_GT(cache->GetUsage(), cache->GetCapacity()); expected = {}; // For CacheEntryStatsCollector