diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc
index 9582910ba..2a26cf07f 100644
--- a/cache/clock_cache.cc
+++ b/cache/clock_cache.cc
@@ -364,21 +364,22 @@ inline bool HyperClockTable::ChargeUsageMaybeEvictNonStrict(
return true;
}
-inline HyperClockTable::HandleImpl* HyperClockTable::DetachedInsert(
+inline HyperClockTable::HandleImpl* HyperClockTable::StandaloneInsert(
const ClockHandleBasicData& proto) {
// Heap allocated separate from table
HandleImpl* h = new HandleImpl();
ClockHandleBasicData* h_alias = h;
*h_alias = proto;
- h->SetDetached();
- // Single reference (detached entries only created if returning a refed
+ h->SetStandalone();
+ // Single reference (standalone entries only created if returning a refed
// Handle back to user)
uint64_t meta = uint64_t{ClockHandle::kStateInvisible}
<< ClockHandle::kStateShift;
meta |= uint64_t{1} << ClockHandle::kAcquireCounterShift;
h->meta.store(meta, std::memory_order_release);
- // Keep track of how much of usage is detached
- detached_usage_.fetch_add(proto.GetTotalCharge(), std::memory_order_relaxed);
+ // Keep track of how much of usage is standalone
+ standalone_usage_.fetch_add(proto.GetTotalCharge(),
+ std::memory_order_relaxed);
return h;
}
@@ -396,7 +397,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Usage/capacity handling is somewhat different depending on
// strict_capacity_limit, but mostly pessimistic.
- bool use_detached_insert = false;
+ bool use_standalone_insert = false;
const size_t total_charge = proto.GetTotalCharge();
if (strict_capacity_limit) {
Status s = ChargeUsageMaybeEvictStrict(total_charge, capacity,
@@ -417,9 +418,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
proto.FreeData(allocator_);
return Status::OK();
} else {
- // Need to track usage of fallback detached insert
+ // Need to track usage of fallback standalone insert
usage_.fetch_add(total_charge, std::memory_order_relaxed);
- use_detached_insert = true;
+ use_standalone_insert = true;
}
}
}
@@ -429,7 +430,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
assert(usage_.load(std::memory_order_relaxed) < SIZE_MAX / 2);
};
- if (!use_detached_insert) {
+ if (!use_standalone_insert) {
// Attempt a table insert, but abort if we find an existing entry for the
// key. If we were to overwrite old entries, we would either
// * Have to gain ownership over an existing entry to overwrite it, which
@@ -500,8 +501,8 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
std::memory_order_acq_rel);
// Correct for possible (but rare) overflow
CorrectNearOverflow(old_meta, h->meta);
- // Insert detached instead (only if return handle needed)
- use_detached_insert = true;
+ // Insert standalone instead (only if return handle needed)
+ use_standalone_insert = true;
return true;
} else {
// Mismatch. Pretend we never took the reference
@@ -539,9 +540,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// That should be infeasible for roughly n >= 256, so if this assertion
// fails, that suggests something is going wrong.
assert(GetTableSize() < 256);
- use_detached_insert = true;
+ use_standalone_insert = true;
}
- if (!use_detached_insert) {
+ if (!use_standalone_insert) {
// Successfully inserted
if (handle) {
*handle = e;
@@ -551,7 +552,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Roll back table insertion
Rollback(proto.hashed_key, e);
revert_occupancy_fn();
- // Maybe fall back on detached insert
+ // Maybe fall back on standalone insert
if (handle == nullptr) {
revert_usage_fn();
// As if unrefed entry immdiately evicted
@@ -560,16 +561,16 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
}
}
- // Run detached insert
- assert(use_detached_insert);
+ // Run standalone insert
+ assert(use_standalone_insert);
- *handle = DetachedInsert(proto);
+ *handle = StandaloneInsert(proto);
// The OkOverwritten status is used to count "redundant" insertions into
// block cache. This implementation doesn't strictly check for redundant
// insertions, but we instead are probably interested in how many insertions
- // didn't go into the table (instead "detached"), which could be redundant
- // Insert or some other reason (use_detached_insert reasons above).
+ // didn't go into the table (instead "standalone"), which could be redundant
+ // Insert or some other reason (use_standalone_insert reasons above).
return Status::OkOverwritten();
}
@@ -696,11 +697,11 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
std::memory_order_acquire));
// Took ownership
size_t total_charge = h->GetTotalCharge();
- if (UNLIKELY(h->IsDetached())) {
+ if (UNLIKELY(h->IsStandalone())) {
h->FreeData(allocator_);
- // Delete detached handle
+ // Delete standalone handle
delete h;
- detached_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
+ standalone_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
usage_.fetch_sub(total_charge, std::memory_order_relaxed);
} else {
Rollback(h->hashed_key, h);
@@ -1156,8 +1157,8 @@ size_t ClockCacheShard
::GetUsage() const {
}
template
-size_t ClockCacheShard::GetDetachedUsage() const {
- return table_.GetDetachedUsage();
+size_t ClockCacheShard::GetStandaloneUsage() const {
+ return table_.GetStandaloneUsage();
}
template
@@ -1191,7 +1192,7 @@ size_t ClockCacheShard::GetPinnedUsage() const {
},
0, table_.GetTableSize(), true);
- return table_pinned_usage + table_.GetDetachedUsage();
+ return table_pinned_usage + table_.GetStandaloneUsage();
}
template
@@ -1259,7 +1260,7 @@ namespace {
void AddShardEvaluation(const HyperClockCache::Shard& shard,
std::vector& predicted_load_factors,
size_t& min_recommendation) {
- size_t usage = shard.GetUsage() - shard.GetDetachedUsage();
+ size_t usage = shard.GetUsage() - shard.GetStandaloneUsage();
size_t capacity = shard.GetCapacity();
double usage_ratio = 1.0 * usage / capacity;
diff --git a/cache/clock_cache.h b/cache/clock_cache.h
index 01185849b..454ee1704 100644
--- a/cache/clock_cache.h
+++ b/cache/clock_cache.h
@@ -145,7 +145,7 @@ class ClockCacheTest;
// (erased by user) but can be read by existing references, and ref count
// changed by Ref and Release.
//
-// A special case is "detached" entries, which are heap-allocated handles
+// A special case is "standalone" entries, which are heap-allocated handles
// not in the table. They are always Invisible and freed on zero refs.
//
// State transitions:
@@ -200,8 +200,8 @@ class ClockCacheTest;
// table occupancy limit has been reached. If strict_capacity_limit=false,
// we must never fail Insert, and if a Handle* is provided, we have to return
// a usable Cache handle on success. The solution to this (typically rare)
-// problem is "detached" handles, which are usable by the caller but not
-// actually available for Lookup in the Cache. Detached handles are allocated
+// problem is "standalone" handles, which are usable by the caller but not
+// actually available for Lookup in the Cache. Standalone handles are allocated
// independently on the heap and specially marked so that they are freed on
// the heap when their last reference is released.
//
@@ -312,12 +312,6 @@ struct ClockHandleBasicData {
UniqueId64x2 hashed_key = kNullUniqueId64x2;
size_t total_charge = 0;
- // For total_charge_and_flags
- // "Detached" means the handle is allocated separately from hash table.
- static constexpr uint64_t kFlagDetached = uint64_t{1} << 63;
- // Extract just the total charge
- static constexpr uint64_t kTotalChargeMask = kFlagDetached - 1;
-
inline size_t GetTotalCharge() const { return total_charge; }
// Calls deleter (if non-null) on cache key and value
@@ -398,11 +392,11 @@ class HyperClockTable {
// TODO: ideally this would be packed into some other data field, such
// as upper bits of total_charge, but that incurs a measurable performance
// regression.
- bool detached = false;
+ bool standalone = false;
- inline bool IsDetached() const { return detached; }
+ inline bool IsStandalone() const { return standalone; }
- inline void SetDetached() { detached = true; }
+ inline void SetStandalone() { standalone = true; }
}; // struct HandleImpl
struct Opts {
@@ -444,8 +438,8 @@ class HyperClockTable {
size_t GetUsage() const { return usage_.load(std::memory_order_relaxed); }
- size_t GetDetachedUsage() const {
- return detached_usage_.load(std::memory_order_relaxed);
+ size_t GetStandaloneUsage() const {
+ return standalone_usage_.load(std::memory_order_relaxed);
}
// Acquire/release N references
@@ -514,10 +508,10 @@ class HyperClockTable {
size_t capacity,
bool need_evict_for_occupancy);
- // Creates a "detached" handle for returning from an Insert operation that
+ // Creates a "standalone" handle for returning from an Insert operation that
// cannot be completed by actually inserting into the table.
- // Updates `detached_usage_` but not `usage_` nor `occupancy_`.
- inline HandleImpl* DetachedInsert(const ClockHandleBasicData& proto);
+ // Updates `standalone_usage_` but not `usage_` nor `occupancy_`.
+ inline HandleImpl* StandaloneInsert(const ClockHandleBasicData& proto);
MemoryAllocator* GetAllocator() const { return allocator_; }
@@ -555,11 +549,11 @@ class HyperClockTable {
// Number of elements in the table.
std::atomic occupancy_{};
- // Memory usage by entries tracked by the cache (including detached)
+ // Memory usage by entries tracked by the cache (including standalone)
std::atomic usage_{};
- // Part of usage by detached entries (not in table)
- std::atomic detached_usage_{};
+ // Part of usage by standalone entries (not in table)
+ std::atomic standalone_usage_{};
}; // class HyperClockTable
// A single shard of sharded cache.
@@ -623,7 +617,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
size_t GetUsage() const;
- size_t GetDetachedUsage() const;
+ size_t GetStandaloneUsage() const;
size_t GetPinnedUsage() const;
diff --git a/cache/compressed_secondary_cache.cc b/cache/compressed_secondary_cache.cc
index 1b97379de..affea8c54 100644
--- a/cache/compressed_secondary_cache.cc
+++ b/cache/compressed_secondary_cache.cc
@@ -40,10 +40,10 @@ CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
std::unique_ptr CompressedSecondaryCache::Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
- bool& is_in_sec_cache) {
+ bool& kept_in_sec_cache) {
assert(helper);
std::unique_ptr handle;
- is_in_sec_cache = false;
+ kept_in_sec_cache = false;
Cache::Handle* lru_handle = cache_->Lookup(key);
if (lru_handle == nullptr) {
return nullptr;
@@ -109,7 +109,7 @@ std::unique_ptr CompressedSecondaryCache::Lookup(
/*charge=*/0)
.PermitUncheckedError();
} else {
- is_in_sec_cache = true;
+ kept_in_sec_cache = true;
cache_->Release(lru_handle, /*erase_if_last_ref=*/false);
}
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));
diff --git a/cache/compressed_secondary_cache.h b/cache/compressed_secondary_cache.h
index 3a85c369d..0e90945cb 100644
--- a/cache/compressed_secondary_cache.h
+++ b/cache/compressed_secondary_cache.h
@@ -91,7 +91,7 @@ class CompressedSecondaryCache : public SecondaryCache {
std::unique_ptr Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
- bool& is_in_sec_cache) override;
+ bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return true; }
diff --git a/cache/compressed_secondary_cache_test.cc b/cache/compressed_secondary_cache_test.cc
index ca0f2621e..c05582883 100644
--- a/cache/compressed_secondary_cache_test.cc
+++ b/cache/compressed_secondary_cache_test.cc
@@ -100,10 +100,10 @@ class CompressedSecondaryCacheTest : public testing::Test,
void BasicTestHelper(std::shared_ptr sec_cache,
bool sec_cache_is_compressed) {
get_perf_context()->Reset();
- bool is_in_sec_cache{true};
+ bool kept_in_sec_cache{true};
// Lookup an non-existent key.
std::unique_ptr handle0 = sec_cache->Lookup(
- "k0", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
+ "k0", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle0, nullptr);
Random rnd(301);
@@ -117,7 +117,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
std::unique_ptr handle1_1 = sec_cache->Lookup(
- "k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr);
// Insert and Lookup the item k1 for the second time and advise erasing it.
@@ -125,9 +125,9 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1);
std::unique_ptr handle1_2 = sec_cache->Lookup(
- "k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
+ "k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle1_2, nullptr);
- ASSERT_FALSE(is_in_sec_cache);
+ ASSERT_FALSE(kept_in_sec_cache);
if (sec_cache_is_compressed) {
ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes,
1000);
@@ -145,7 +145,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
// Lookup the item k1 again.
std::unique_ptr handle1_3 = sec_cache->Lookup(
- "k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
+ "k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle1_3, nullptr);
// Insert and Lookup the item k2.
@@ -154,7 +154,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2);
std::unique_ptr handle2_1 = sec_cache->Lookup(
- "k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr);
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
@@ -169,7 +169,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
}
std::unique_ptr handle2_2 = sec_cache->Lookup(
- "k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2_2, nullptr);
std::unique_ptr val2 =
std::unique_ptr(static_cast(handle2_2->Value()));
@@ -247,15 +247,15 @@ class CompressedSecondaryCacheTest : public testing::Test,
TestItem item2(str2.data(), str2.length());
// Insert a dummy handle, k1 is not evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
- bool is_in_sec_cache{false};
+ bool kept_in_sec_cache{false};
std::unique_ptr handle1 = sec_cache->Lookup(
- "k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1, nullptr);
// Insert k2 and k1 is evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
std::unique_ptr handle2 = sec_cache->Lookup(
- "k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2, nullptr);
std::unique_ptr val2 =
std::unique_ptr(static_cast(handle2->Value()));
@@ -266,13 +266,13 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k1", &item1, &kHelper));
std::unique_ptr handle1_1 = sec_cache->Lookup(
- "k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
+ "k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr);
// Create Fails.
SetFailCreate(true);
std::unique_ptr handle2_1 = sec_cache->Lookup(
- "k2", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
+ "k2", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr);
// Save Fails.
@@ -970,10 +970,10 @@ TEST_P(CompressedSecondaryCacheTestWithCompressionParam, EntryRoles) {
ASSERT_OK(sec_cache->Insert(ith_key, &item, &kHelperByRole[i]));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1U);
- bool is_in_sec_cache{true};
+ bool kept_in_sec_cache{true};
std::unique_ptr handle =
sec_cache->Lookup(ith_key, &kHelperByRole[i], this, true,
- /*advise_erase=*/true, is_in_sec_cache);
+ /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle, nullptr);
// Lookup returns the right data
diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc
index 185f1d870..f9bb8e19f 100644
--- a/cache/lru_cache.cc
+++ b/cache/lru_cache.cc
@@ -555,10 +555,10 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
// again, we erase it from CompressedSecondaryCache and add it into the
// primary cache.
if (!e && secondary_cache_ && helper && helper->create_cb) {
- bool is_in_sec_cache{false};
+ bool kept_in_sec_cache{false};
std::unique_ptr secondary_handle =
secondary_cache_->Lookup(key, helper, create_context, wait,
- found_dummy_entry, is_in_sec_cache);
+ found_dummy_entry, kept_in_sec_cache);
if (secondary_handle != nullptr) {
e = static_cast(malloc(sizeof(LRUHandle) - 1 + key.size()));
@@ -575,7 +575,7 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
e->sec_handle = secondary_handle.release();
e->total_charge = 0;
e->Ref();
- e->SetIsInSecondaryCache(is_in_sec_cache);
+ e->SetIsInSecondaryCache(kept_in_sec_cache);
e->SetIsStandalone(secondary_cache_->SupportForceErase() &&
!found_dummy_entry);
diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc
index 02e25c87d..350411058 100644
--- a/cache/lru_cache_test.cc
+++ b/cache/lru_cache_test.cc
@@ -934,12 +934,12 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/,
- bool /*advise_erase*/, bool& is_in_sec_cache) override {
+ bool /*advise_erase*/, bool& kept_in_sec_cache) override {
std::string key_str = key.ToString();
TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
std::unique_ptr secondary_handle;
- is_in_sec_cache = false;
+ kept_in_sec_cache = false;
ResultType type = ResultType::SUCCESS;
auto iter = result_map_.find(key.ToString());
if (iter != result_map_.end()) {
@@ -965,7 +965,7 @@ class TestSecondaryCache : public SecondaryCache {
if (s.ok()) {
secondary_handle.reset(new TestSecondaryCacheResultHandle(
cache_.get(), handle, value, charge, type));
- is_in_sec_cache = true;
+ kept_in_sec_cache = true;
} else {
cache_.Release(handle);
}
diff --git a/db/blob/blob_source_test.cc b/db/blob/blob_source_test.cc
index fdbb69a94..e0827c8c5 100644
--- a/db/blob/blob_source_test.cc
+++ b/db/blob/blob_source_test.cc
@@ -1214,12 +1214,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_EQ(handle0, nullptr);
// key0's item should be in the secondary cache.
- bool is_in_sec_cache = false;
+ bool kept_in_sec_cache = false;
auto sec_handle0 = secondary_cache->Lookup(
key0, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true,
- /*advise_erase=*/true, is_in_sec_cache);
- ASSERT_FALSE(is_in_sec_cache);
+ /*advise_erase=*/true, kept_in_sec_cache);
+ ASSERT_FALSE(kept_in_sec_cache);
ASSERT_NE(sec_handle0, nullptr);
ASSERT_TRUE(sec_handle0->IsReady());
auto value = static_cast(sec_handle0->Value());
@@ -1242,12 +1242,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_NE(handle1, nullptr);
blob_cache->Release(handle1);
- bool is_in_sec_cache = false;
+ bool kept_in_sec_cache = false;
auto sec_handle1 = secondary_cache->Lookup(
key1, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true,
- /*advise_erase=*/true, is_in_sec_cache);
- ASSERT_FALSE(is_in_sec_cache);
+ /*advise_erase=*/true, kept_in_sec_cache);
+ ASSERT_FALSE(kept_in_sec_cache);
ASSERT_EQ(sec_handle1, nullptr);
ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
diff --git a/include/rocksdb/secondary_cache.h b/include/rocksdb/secondary_cache.h
index d2bb29f5c..b8deee66f 100644
--- a/include/rocksdb/secondary_cache.h
+++ b/include/rocksdb/secondary_cache.h
@@ -99,12 +99,12 @@ class SecondaryCache : public Customizable {
// needs to return true.
// This hint can also be safely ignored.
//
- // is_in_sec_cache is to indicate whether the handle is possibly erased
- // from the secondary cache after the Lookup.
+ // kept_in_sec_cache is to indicate whether the entry will be kept in the
+ // secondary cache after the Lookup (rather than erased because of Lookup)
virtual std::unique_ptr Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase,
- bool& is_in_sec_cache) = 0;
+ bool& kept_in_sec_cache) = 0;
// Indicate whether a handle can be erased in this secondary cache.
[[nodiscard]] virtual bool SupportForceErase() const = 0;
diff --git a/options/customizable_test.cc b/options/customizable_test.cc
index 2f4f34ee9..d18335410 100644
--- a/options/customizable_test.cc
+++ b/options/customizable_test.cc
@@ -1236,8 +1236,8 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr Lookup(
const Slice& /*key*/, const Cache::CacheItemHelper* /*helper*/,
Cache::CreateContext* /*create_context*/, bool /*wait*/,
- bool /*advise_erase*/, bool& is_in_sec_cache) override {
- is_in_sec_cache = true;
+ bool /*advise_erase*/, bool& kept_in_sec_cache) override {
+ kept_in_sec_cache = true;
return nullptr;
}
diff --git a/utilities/fault_injection_secondary_cache.cc b/utilities/fault_injection_secondary_cache.cc
index d24e92f06..d7a2a1bd7 100644
--- a/utilities/fault_injection_secondary_cache.cc
+++ b/utilities/fault_injection_secondary_cache.cc
@@ -92,18 +92,18 @@ FaultInjectionSecondaryCache::Lookup(const Slice& key,
const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context,
bool wait, bool advise_erase,
- bool& is_in_sec_cache) {
+ bool& kept_in_sec_cache) {
ErrorContext* ctx = GetErrorContext();
if (base_is_compressed_sec_cache_) {
if (ctx->rand.OneIn(prob_)) {
return nullptr;
} else {
return base_->Lookup(key, helper, create_context, wait, advise_erase,
- is_in_sec_cache);
+ kept_in_sec_cache);
}
} else {
std::unique_ptr hdl = base_->Lookup(
- key, helper, create_context, wait, advise_erase, is_in_sec_cache);
+ key, helper, create_context, wait, advise_erase, kept_in_sec_cache);
if (wait && ctx->rand.OneIn(prob_)) {
hdl.reset();
}
diff --git a/utilities/fault_injection_secondary_cache.h b/utilities/fault_injection_secondary_cache.h
index 47585e30e..ed89f655a 100644
--- a/utilities/fault_injection_secondary_cache.h
+++ b/utilities/fault_injection_secondary_cache.h
@@ -37,7 +37,7 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
std::unique_ptr Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase,
- bool& is_in_sec_cache) override;
+ bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return base_->SupportForceErase(); }