Misc cleanup of block cache code (#11291)

Summary:
... ahead of a larger change.
* Rename confusingly named `is_in_sec_cache` to `kept_in_sec_cache`
* Unify naming of "standalone" block cache entries (was "detached" in clock_cache)
* Remove some unused definitions in clock_cache.h (leftover from a previous revision)

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11291

Test Plan: usual tests and CI, no behavior changes

Reviewed By: anand1976

Differential Revision: D43984642

Pulled By: pdillinger

fbshipit-source-id: b8bf0c5b90a932a88bcbdb413b2f256834aedf97
oxigraph-8.1.1
Peter Dillinger 2 years ago committed by Facebook GitHub Bot
parent 11cb6af6e5
commit 601efe3cf2
  1. 53
      cache/clock_cache.cc
  2. 36
      cache/clock_cache.h
  3. 6
      cache/compressed_secondary_cache.cc
  4. 2
      cache/compressed_secondary_cache.h
  5. 30
      cache/compressed_secondary_cache_test.cc
  6. 6
      cache/lru_cache.cc
  7. 6
      cache/lru_cache_test.cc
  8. 12
      db/blob/blob_source_test.cc
  9. 6
      include/rocksdb/secondary_cache.h
  10. 4
      options/customizable_test.cc
  11. 6
      utilities/fault_injection_secondary_cache.cc
  12. 2
      utilities/fault_injection_secondary_cache.h

@ -364,21 +364,22 @@ inline bool HyperClockTable::ChargeUsageMaybeEvictNonStrict(
return true; return true;
} }
inline HyperClockTable::HandleImpl* HyperClockTable::DetachedInsert( inline HyperClockTable::HandleImpl* HyperClockTable::StandaloneInsert(
const ClockHandleBasicData& proto) { const ClockHandleBasicData& proto) {
// Heap allocated separate from table // Heap allocated separate from table
HandleImpl* h = new HandleImpl(); HandleImpl* h = new HandleImpl();
ClockHandleBasicData* h_alias = h; ClockHandleBasicData* h_alias = h;
*h_alias = proto; *h_alias = proto;
h->SetDetached(); h->SetStandalone();
// Single reference (detached entries only created if returning a refed // Single reference (standalone entries only created if returning a refed
// Handle back to user) // Handle back to user)
uint64_t meta = uint64_t{ClockHandle::kStateInvisible} uint64_t meta = uint64_t{ClockHandle::kStateInvisible}
<< ClockHandle::kStateShift; << ClockHandle::kStateShift;
meta |= uint64_t{1} << ClockHandle::kAcquireCounterShift; meta |= uint64_t{1} << ClockHandle::kAcquireCounterShift;
h->meta.store(meta, std::memory_order_release); h->meta.store(meta, std::memory_order_release);
// Keep track of how much of usage is detached // Keep track of how much of usage is standalone
detached_usage_.fetch_add(proto.GetTotalCharge(), std::memory_order_relaxed); standalone_usage_.fetch_add(proto.GetTotalCharge(),
std::memory_order_relaxed);
return h; return h;
} }
@ -396,7 +397,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Usage/capacity handling is somewhat different depending on // Usage/capacity handling is somewhat different depending on
// strict_capacity_limit, but mostly pessimistic. // strict_capacity_limit, but mostly pessimistic.
bool use_detached_insert = false; bool use_standalone_insert = false;
const size_t total_charge = proto.GetTotalCharge(); const size_t total_charge = proto.GetTotalCharge();
if (strict_capacity_limit) { if (strict_capacity_limit) {
Status s = ChargeUsageMaybeEvictStrict(total_charge, capacity, Status s = ChargeUsageMaybeEvictStrict(total_charge, capacity,
@ -417,9 +418,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
proto.FreeData(allocator_); proto.FreeData(allocator_);
return Status::OK(); return Status::OK();
} else { } else {
// Need to track usage of fallback detached insert // Need to track usage of fallback standalone insert
usage_.fetch_add(total_charge, std::memory_order_relaxed); usage_.fetch_add(total_charge, std::memory_order_relaxed);
use_detached_insert = true; use_standalone_insert = true;
} }
} }
} }
@ -429,7 +430,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
assert(usage_.load(std::memory_order_relaxed) < SIZE_MAX / 2); assert(usage_.load(std::memory_order_relaxed) < SIZE_MAX / 2);
}; };
if (!use_detached_insert) { if (!use_standalone_insert) {
// Attempt a table insert, but abort if we find an existing entry for the // Attempt a table insert, but abort if we find an existing entry for the
// key. If we were to overwrite old entries, we would either // key. If we were to overwrite old entries, we would either
// * Have to gain ownership over an existing entry to overwrite it, which // * Have to gain ownership over an existing entry to overwrite it, which
@ -500,8 +501,8 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
std::memory_order_acq_rel); std::memory_order_acq_rel);
// Correct for possible (but rare) overflow // Correct for possible (but rare) overflow
CorrectNearOverflow(old_meta, h->meta); CorrectNearOverflow(old_meta, h->meta);
// Insert detached instead (only if return handle needed) // Insert standalone instead (only if return handle needed)
use_detached_insert = true; use_standalone_insert = true;
return true; return true;
} else { } else {
// Mismatch. Pretend we never took the reference // Mismatch. Pretend we never took the reference
@ -539,9 +540,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// That should be infeasible for roughly n >= 256, so if this assertion // That should be infeasible for roughly n >= 256, so if this assertion
// fails, that suggests something is going wrong. // fails, that suggests something is going wrong.
assert(GetTableSize() < 256); assert(GetTableSize() < 256);
use_detached_insert = true; use_standalone_insert = true;
} }
if (!use_detached_insert) { if (!use_standalone_insert) {
// Successfully inserted // Successfully inserted
if (handle) { if (handle) {
*handle = e; *handle = e;
@ -551,7 +552,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Roll back table insertion // Roll back table insertion
Rollback(proto.hashed_key, e); Rollback(proto.hashed_key, e);
revert_occupancy_fn(); revert_occupancy_fn();
// Maybe fall back on detached insert // Maybe fall back on standalone insert
if (handle == nullptr) { if (handle == nullptr) {
revert_usage_fn(); revert_usage_fn();
// As if unrefed entry immdiately evicted // As if unrefed entry immdiately evicted
@ -560,16 +561,16 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
} }
} }
// Run detached insert // Run standalone insert
assert(use_detached_insert); assert(use_standalone_insert);
*handle = DetachedInsert(proto); *handle = StandaloneInsert(proto);
// The OkOverwritten status is used to count "redundant" insertions into // The OkOverwritten status is used to count "redundant" insertions into
// block cache. This implementation doesn't strictly check for redundant // block cache. This implementation doesn't strictly check for redundant
// insertions, but we instead are probably interested in how many insertions // insertions, but we instead are probably interested in how many insertions
// didn't go into the table (instead "detached"), which could be redundant // didn't go into the table (instead "standalone"), which could be redundant
// Insert or some other reason (use_detached_insert reasons above). // Insert or some other reason (use_standalone_insert reasons above).
return Status::OkOverwritten(); return Status::OkOverwritten();
} }
@ -696,11 +697,11 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
std::memory_order_acquire)); std::memory_order_acquire));
// Took ownership // Took ownership
size_t total_charge = h->GetTotalCharge(); size_t total_charge = h->GetTotalCharge();
if (UNLIKELY(h->IsDetached())) { if (UNLIKELY(h->IsStandalone())) {
h->FreeData(allocator_); h->FreeData(allocator_);
// Delete detached handle // Delete standalone handle
delete h; delete h;
detached_usage_.fetch_sub(total_charge, std::memory_order_relaxed); standalone_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
usage_.fetch_sub(total_charge, std::memory_order_relaxed); usage_.fetch_sub(total_charge, std::memory_order_relaxed);
} else { } else {
Rollback(h->hashed_key, h); Rollback(h->hashed_key, h);
@ -1156,8 +1157,8 @@ size_t ClockCacheShard<Table>::GetUsage() const {
} }
template <class Table> template <class Table>
size_t ClockCacheShard<Table>::GetDetachedUsage() const { size_t ClockCacheShard<Table>::GetStandaloneUsage() const {
return table_.GetDetachedUsage(); return table_.GetStandaloneUsage();
} }
template <class Table> template <class Table>
@ -1191,7 +1192,7 @@ size_t ClockCacheShard<Table>::GetPinnedUsage() const {
}, },
0, table_.GetTableSize(), true); 0, table_.GetTableSize(), true);
return table_pinned_usage + table_.GetDetachedUsage(); return table_pinned_usage + table_.GetStandaloneUsage();
} }
template <class Table> template <class Table>
@ -1259,7 +1260,7 @@ namespace {
void AddShardEvaluation(const HyperClockCache::Shard& shard, void AddShardEvaluation(const HyperClockCache::Shard& shard,
std::vector<double>& predicted_load_factors, std::vector<double>& predicted_load_factors,
size_t& min_recommendation) { size_t& min_recommendation) {
size_t usage = shard.GetUsage() - shard.GetDetachedUsage(); size_t usage = shard.GetUsage() - shard.GetStandaloneUsage();
size_t capacity = shard.GetCapacity(); size_t capacity = shard.GetCapacity();
double usage_ratio = 1.0 * usage / capacity; double usage_ratio = 1.0 * usage / capacity;

@ -145,7 +145,7 @@ class ClockCacheTest;
// (erased by user) but can be read by existing references, and ref count // (erased by user) but can be read by existing references, and ref count
// changed by Ref and Release. // changed by Ref and Release.
// //
// A special case is "detached" entries, which are heap-allocated handles // A special case is "standalone" entries, which are heap-allocated handles
// not in the table. They are always Invisible and freed on zero refs. // not in the table. They are always Invisible and freed on zero refs.
// //
// State transitions: // State transitions:
@ -200,8 +200,8 @@ class ClockCacheTest;
// table occupancy limit has been reached. If strict_capacity_limit=false, // table occupancy limit has been reached. If strict_capacity_limit=false,
// we must never fail Insert, and if a Handle* is provided, we have to return // we must never fail Insert, and if a Handle* is provided, we have to return
// a usable Cache handle on success. The solution to this (typically rare) // a usable Cache handle on success. The solution to this (typically rare)
// problem is "detached" handles, which are usable by the caller but not // problem is "standalone" handles, which are usable by the caller but not
// actually available for Lookup in the Cache. Detached handles are allocated // actually available for Lookup in the Cache. Standalone handles are allocated
// independently on the heap and specially marked so that they are freed on // independently on the heap and specially marked so that they are freed on
// the heap when their last reference is released. // the heap when their last reference is released.
// //
@ -312,12 +312,6 @@ struct ClockHandleBasicData {
UniqueId64x2 hashed_key = kNullUniqueId64x2; UniqueId64x2 hashed_key = kNullUniqueId64x2;
size_t total_charge = 0; size_t total_charge = 0;
// For total_charge_and_flags
// "Detached" means the handle is allocated separately from hash table.
static constexpr uint64_t kFlagDetached = uint64_t{1} << 63;
// Extract just the total charge
static constexpr uint64_t kTotalChargeMask = kFlagDetached - 1;
inline size_t GetTotalCharge() const { return total_charge; } inline size_t GetTotalCharge() const { return total_charge; }
// Calls deleter (if non-null) on cache key and value // Calls deleter (if non-null) on cache key and value
@ -398,11 +392,11 @@ class HyperClockTable {
// TODO: ideally this would be packed into some other data field, such // TODO: ideally this would be packed into some other data field, such
// as upper bits of total_charge, but that incurs a measurable performance // as upper bits of total_charge, but that incurs a measurable performance
// regression. // regression.
bool detached = false; bool standalone = false;
inline bool IsDetached() const { return detached; } inline bool IsStandalone() const { return standalone; }
inline void SetDetached() { detached = true; } inline void SetStandalone() { standalone = true; }
}; // struct HandleImpl }; // struct HandleImpl
struct Opts { struct Opts {
@ -444,8 +438,8 @@ class HyperClockTable {
size_t GetUsage() const { return usage_.load(std::memory_order_relaxed); } size_t GetUsage() const { return usage_.load(std::memory_order_relaxed); }
size_t GetDetachedUsage() const { size_t GetStandaloneUsage() const {
return detached_usage_.load(std::memory_order_relaxed); return standalone_usage_.load(std::memory_order_relaxed);
} }
// Acquire/release N references // Acquire/release N references
@ -514,10 +508,10 @@ class HyperClockTable {
size_t capacity, size_t capacity,
bool need_evict_for_occupancy); bool need_evict_for_occupancy);
// Creates a "detached" handle for returning from an Insert operation that // Creates a "standalone" handle for returning from an Insert operation that
// cannot be completed by actually inserting into the table. // cannot be completed by actually inserting into the table.
// Updates `detached_usage_` but not `usage_` nor `occupancy_`. // Updates `standalone_usage_` but not `usage_` nor `occupancy_`.
inline HandleImpl* DetachedInsert(const ClockHandleBasicData& proto); inline HandleImpl* StandaloneInsert(const ClockHandleBasicData& proto);
MemoryAllocator* GetAllocator() const { return allocator_; } MemoryAllocator* GetAllocator() const { return allocator_; }
@ -555,11 +549,11 @@ class HyperClockTable {
// Number of elements in the table. // Number of elements in the table.
std::atomic<size_t> occupancy_{}; std::atomic<size_t> occupancy_{};
// Memory usage by entries tracked by the cache (including detached) // Memory usage by entries tracked by the cache (including standalone)
std::atomic<size_t> usage_{}; std::atomic<size_t> usage_{};
// Part of usage by detached entries (not in table) // Part of usage by standalone entries (not in table)
std::atomic<size_t> detached_usage_{}; std::atomic<size_t> standalone_usage_{};
}; // class HyperClockTable }; // class HyperClockTable
// A single shard of sharded cache. // A single shard of sharded cache.
@ -623,7 +617,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
size_t GetUsage() const; size_t GetUsage() const;
size_t GetDetachedUsage() const; size_t GetStandaloneUsage() const;
size_t GetPinnedUsage() const; size_t GetPinnedUsage() const;

@ -40,10 +40,10 @@ CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup( std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
const Slice& key, const Cache::CacheItemHelper* helper, const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase, Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
bool& is_in_sec_cache) { bool& kept_in_sec_cache) {
assert(helper); assert(helper);
std::unique_ptr<SecondaryCacheResultHandle> handle; std::unique_ptr<SecondaryCacheResultHandle> handle;
is_in_sec_cache = false; kept_in_sec_cache = false;
Cache::Handle* lru_handle = cache_->Lookup(key); Cache::Handle* lru_handle = cache_->Lookup(key);
if (lru_handle == nullptr) { if (lru_handle == nullptr) {
return nullptr; return nullptr;
@ -109,7 +109,7 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
/*charge=*/0) /*charge=*/0)
.PermitUncheckedError(); .PermitUncheckedError();
} else { } else {
is_in_sec_cache = true; kept_in_sec_cache = true;
cache_->Release(lru_handle, /*erase_if_last_ref=*/false); cache_->Release(lru_handle, /*erase_if_last_ref=*/false);
} }
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge)); handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));

@ -91,7 +91,7 @@ class CompressedSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup( std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper, const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase, Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
bool& is_in_sec_cache) override; bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return true; } bool SupportForceErase() const override { return true; }

@ -100,10 +100,10 @@ class CompressedSecondaryCacheTest : public testing::Test,
void BasicTestHelper(std::shared_ptr<SecondaryCache> sec_cache, void BasicTestHelper(std::shared_ptr<SecondaryCache> sec_cache,
bool sec_cache_is_compressed) { bool sec_cache_is_compressed) {
get_perf_context()->Reset(); get_perf_context()->Reset();
bool is_in_sec_cache{true}; bool kept_in_sec_cache{true};
// Lookup an non-existent key. // Lookup an non-existent key.
std::unique_ptr<SecondaryCacheResultHandle> handle0 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle0 = sec_cache->Lookup(
"k0", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache); "k0", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle0, nullptr); ASSERT_EQ(handle0, nullptr);
Random rnd(301); Random rnd(301);
@ -117,7 +117,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0); ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr); ASSERT_EQ(handle1_1, nullptr);
// Insert and Lookup the item k1 for the second time and advise erasing it. // Insert and Lookup the item k1 for the second time and advise erasing it.
@ -125,9 +125,9 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1); ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1);
std::unique_ptr<SecondaryCacheResultHandle> handle1_2 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle1_2 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache); "k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle1_2, nullptr); ASSERT_NE(handle1_2, nullptr);
ASSERT_FALSE(is_in_sec_cache); ASSERT_FALSE(kept_in_sec_cache);
if (sec_cache_is_compressed) { if (sec_cache_is_compressed) {
ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes,
1000); 1000);
@ -145,7 +145,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
// Lookup the item k1 again. // Lookup the item k1 again.
std::unique_ptr<SecondaryCacheResultHandle> handle1_3 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle1_3 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache); "k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle1_3, nullptr); ASSERT_EQ(handle1_3, nullptr);
// Insert and Lookup the item k2. // Insert and Lookup the item k2.
@ -154,7 +154,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper)); ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2); ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2);
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr); ASSERT_EQ(handle2_1, nullptr);
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper)); ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
@ -169,7 +169,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0); ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
} }
std::unique_ptr<SecondaryCacheResultHandle> handle2_2 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle2_2 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2_2, nullptr); ASSERT_NE(handle2_2, nullptr);
std::unique_ptr<TestItem> val2 = std::unique_ptr<TestItem> val2 =
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2_2->Value())); std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2_2->Value()));
@ -247,15 +247,15 @@ class CompressedSecondaryCacheTest : public testing::Test,
TestItem item2(str2.data(), str2.length()); TestItem item2(str2.data(), str2.length());
// Insert a dummy handle, k1 is not evicted. // Insert a dummy handle, k1 is not evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper)); ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
bool is_in_sec_cache{false}; bool kept_in_sec_cache{false};
std::unique_ptr<SecondaryCacheResultHandle> handle1 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1, nullptr); ASSERT_EQ(handle1, nullptr);
// Insert k2 and k1 is evicted. // Insert k2 and k1 is evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper)); ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
std::unique_ptr<SecondaryCacheResultHandle> handle2 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle2 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2, nullptr); ASSERT_NE(handle2, nullptr);
std::unique_ptr<TestItem> val2 = std::unique_ptr<TestItem> val2 =
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value())); std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
@ -266,13 +266,13 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k1", &item1, &kHelper)); ASSERT_OK(sec_cache->Insert("k1", &item1, &kHelper));
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache); "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr); ASSERT_EQ(handle1_1, nullptr);
// Create Fails. // Create Fails.
SetFailCreate(true); SetFailCreate(true);
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup( std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache); "k2", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr); ASSERT_EQ(handle2_1, nullptr);
// Save Fails. // Save Fails.
@ -970,10 +970,10 @@ TEST_P(CompressedSecondaryCacheTestWithCompressionParam, EntryRoles) {
ASSERT_OK(sec_cache->Insert(ith_key, &item, &kHelperByRole[i])); ASSERT_OK(sec_cache->Insert(ith_key, &item, &kHelperByRole[i]));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1U); ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1U);
bool is_in_sec_cache{true}; bool kept_in_sec_cache{true};
std::unique_ptr<SecondaryCacheResultHandle> handle = std::unique_ptr<SecondaryCacheResultHandle> handle =
sec_cache->Lookup(ith_key, &kHelperByRole[i], this, true, sec_cache->Lookup(ith_key, &kHelperByRole[i], this, true,
/*advise_erase=*/true, is_in_sec_cache); /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle, nullptr); ASSERT_NE(handle, nullptr);
// Lookup returns the right data // Lookup returns the right data

@ -555,10 +555,10 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
// again, we erase it from CompressedSecondaryCache and add it into the // again, we erase it from CompressedSecondaryCache and add it into the
// primary cache. // primary cache.
if (!e && secondary_cache_ && helper && helper->create_cb) { if (!e && secondary_cache_ && helper && helper->create_cb) {
bool is_in_sec_cache{false}; bool kept_in_sec_cache{false};
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle = std::unique_ptr<SecondaryCacheResultHandle> secondary_handle =
secondary_cache_->Lookup(key, helper, create_context, wait, secondary_cache_->Lookup(key, helper, create_context, wait,
found_dummy_entry, is_in_sec_cache); found_dummy_entry, kept_in_sec_cache);
if (secondary_handle != nullptr) { if (secondary_handle != nullptr) {
e = static_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size())); e = static_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
@ -575,7 +575,7 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
e->sec_handle = secondary_handle.release(); e->sec_handle = secondary_handle.release();
e->total_charge = 0; e->total_charge = 0;
e->Ref(); e->Ref();
e->SetIsInSecondaryCache(is_in_sec_cache); e->SetIsInSecondaryCache(kept_in_sec_cache);
e->SetIsStandalone(secondary_cache_->SupportForceErase() && e->SetIsStandalone(secondary_cache_->SupportForceErase() &&
!found_dummy_entry); !found_dummy_entry);

@ -934,12 +934,12 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup( std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper, const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, Cache::CreateContext* create_context, bool /*wait*/,
bool /*advise_erase*/, bool& is_in_sec_cache) override { bool /*advise_erase*/, bool& kept_in_sec_cache) override {
std::string key_str = key.ToString(); std::string key_str = key.ToString();
TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str); TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle; std::unique_ptr<SecondaryCacheResultHandle> secondary_handle;
is_in_sec_cache = false; kept_in_sec_cache = false;
ResultType type = ResultType::SUCCESS; ResultType type = ResultType::SUCCESS;
auto iter = result_map_.find(key.ToString()); auto iter = result_map_.find(key.ToString());
if (iter != result_map_.end()) { if (iter != result_map_.end()) {
@ -965,7 +965,7 @@ class TestSecondaryCache : public SecondaryCache {
if (s.ok()) { if (s.ok()) {
secondary_handle.reset(new TestSecondaryCacheResultHandle( secondary_handle.reset(new TestSecondaryCacheResultHandle(
cache_.get(), handle, value, charge, type)); cache_.get(), handle, value, charge, type));
is_in_sec_cache = true; kept_in_sec_cache = true;
} else { } else {
cache_.Release(handle); cache_.Release(handle);
} }

@ -1214,12 +1214,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_EQ(handle0, nullptr); ASSERT_EQ(handle0, nullptr);
// key0's item should be in the secondary cache. // key0's item should be in the secondary cache.
bool is_in_sec_cache = false; bool kept_in_sec_cache = false;
auto sec_handle0 = secondary_cache->Lookup( auto sec_handle0 = secondary_cache->Lookup(
key0, &BlobSource::SharedCacheInterface::kFullHelper, key0, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true, /*context*/ nullptr, true,
/*advise_erase=*/true, is_in_sec_cache); /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_FALSE(is_in_sec_cache); ASSERT_FALSE(kept_in_sec_cache);
ASSERT_NE(sec_handle0, nullptr); ASSERT_NE(sec_handle0, nullptr);
ASSERT_TRUE(sec_handle0->IsReady()); ASSERT_TRUE(sec_handle0->IsReady());
auto value = static_cast<BlobContents*>(sec_handle0->Value()); auto value = static_cast<BlobContents*>(sec_handle0->Value());
@ -1242,12 +1242,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_NE(handle1, nullptr); ASSERT_NE(handle1, nullptr);
blob_cache->Release(handle1); blob_cache->Release(handle1);
bool is_in_sec_cache = false; bool kept_in_sec_cache = false;
auto sec_handle1 = secondary_cache->Lookup( auto sec_handle1 = secondary_cache->Lookup(
key1, &BlobSource::SharedCacheInterface::kFullHelper, key1, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true, /*context*/ nullptr, true,
/*advise_erase=*/true, is_in_sec_cache); /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_FALSE(is_in_sec_cache); ASSERT_FALSE(kept_in_sec_cache);
ASSERT_EQ(sec_handle1, nullptr); ASSERT_EQ(sec_handle1, nullptr);
ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size, ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,

@ -99,12 +99,12 @@ class SecondaryCache : public Customizable {
// needs to return true. // needs to return true.
// This hint can also be safely ignored. // This hint can also be safely ignored.
// //
// is_in_sec_cache is to indicate whether the handle is possibly erased // kept_in_sec_cache is to indicate whether the entry will be kept in the
// from the secondary cache after the Lookup. // secondary cache after the Lookup (rather than erased because of Lookup)
virtual std::unique_ptr<SecondaryCacheResultHandle> Lookup( virtual std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper, const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase, Cache::CreateContext* create_context, bool wait, bool advise_erase,
bool& is_in_sec_cache) = 0; bool& kept_in_sec_cache) = 0;
// Indicate whether a handle can be erased in this secondary cache. // Indicate whether a handle can be erased in this secondary cache.
[[nodiscard]] virtual bool SupportForceErase() const = 0; [[nodiscard]] virtual bool SupportForceErase() const = 0;

@ -1236,8 +1236,8 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup( std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& /*key*/, const Cache::CacheItemHelper* /*helper*/, const Slice& /*key*/, const Cache::CacheItemHelper* /*helper*/,
Cache::CreateContext* /*create_context*/, bool /*wait*/, Cache::CreateContext* /*create_context*/, bool /*wait*/,
bool /*advise_erase*/, bool& is_in_sec_cache) override { bool /*advise_erase*/, bool& kept_in_sec_cache) override {
is_in_sec_cache = true; kept_in_sec_cache = true;
return nullptr; return nullptr;
} }

@ -92,18 +92,18 @@ FaultInjectionSecondaryCache::Lookup(const Slice& key,
const Cache::CacheItemHelper* helper, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, Cache::CreateContext* create_context,
bool wait, bool advise_erase, bool wait, bool advise_erase,
bool& is_in_sec_cache) { bool& kept_in_sec_cache) {
ErrorContext* ctx = GetErrorContext(); ErrorContext* ctx = GetErrorContext();
if (base_is_compressed_sec_cache_) { if (base_is_compressed_sec_cache_) {
if (ctx->rand.OneIn(prob_)) { if (ctx->rand.OneIn(prob_)) {
return nullptr; return nullptr;
} else { } else {
return base_->Lookup(key, helper, create_context, wait, advise_erase, return base_->Lookup(key, helper, create_context, wait, advise_erase,
is_in_sec_cache); kept_in_sec_cache);
} }
} else { } else {
std::unique_ptr<SecondaryCacheResultHandle> hdl = base_->Lookup( std::unique_ptr<SecondaryCacheResultHandle> hdl = base_->Lookup(
key, helper, create_context, wait, advise_erase, is_in_sec_cache); key, helper, create_context, wait, advise_erase, kept_in_sec_cache);
if (wait && ctx->rand.OneIn(prob_)) { if (wait && ctx->rand.OneIn(prob_)) {
hdl.reset(); hdl.reset();
} }

@ -37,7 +37,7 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup( std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper, const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase, Cache::CreateContext* create_context, bool wait, bool advise_erase,
bool& is_in_sec_cache) override; bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return base_->SupportForceErase(); } bool SupportForceErase() const override { return base_->SupportForceErase(); }

Loading…
Cancel
Save