diff --git a/HISTORY.md b/HISTORY.md
index 7aef62cae..f11553fe6 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -4,6 +4,9 @@
* Introduced a new option `block_protection_bytes_per_key`, which can be used to enable per key-value integrity protection for in-memory blocks in block cache (#11287).
* Added `JemallocAllocatorOptions::num_arenas`. Setting `num_arenas > 1` may mitigate mutex contention in the allocator, particularly in scenarios where block allocations commonly bypass jemalloc tcache.
+### Public API Changes
+* Add `MakeSharedCache()` construction functions to various cache Options objects, and deprecated the `NewWhateverCache()` functions with long parameter lists.
+
## 8.2.0 (04/24/2023)
### Public API Changes
* `SstFileWriter::DeleteRange()` now returns `Status::InvalidArgument` if the range's end key comes before its start key according to the user comparator. Previously the behavior was undefined.
diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc
index 12be0babe..80fbbe88f 100644
--- a/cache/clock_cache.cc
+++ b/cache/clock_cache.cc
@@ -1282,25 +1282,20 @@ size_t ClockCacheShard
::GetTableAddressCount() const {
// Explicit instantiation
template class ClockCacheShard;
-HyperClockCache::HyperClockCache(
- size_t capacity, size_t estimated_value_size, int num_shard_bits,
- bool strict_capacity_limit,
- CacheMetadataChargePolicy metadata_charge_policy,
- std::shared_ptr memory_allocator)
- : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
- std::move(memory_allocator)) {
- assert(estimated_value_size > 0 ||
- metadata_charge_policy != kDontChargeCacheMetadata);
+HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
+ : ShardedCache(opts) {
+ assert(opts.estimated_entry_charge > 0 ||
+ opts.metadata_charge_policy != kDontChargeCacheMetadata);
// TODO: should not need to go through two levels of pointer indirection to
// get to table entries
size_t per_shard = GetPerShardCapacity();
MemoryAllocator* alloc = this->memory_allocator();
- const Cache::EvictionCallback* eviction_callback = &eviction_callback_;
- InitShards([=](Shard* cs) {
- HyperClockTable::Opts opts;
- opts.estimated_value_size = estimated_value_size;
- new (cs) Shard(per_shard, strict_capacity_limit, metadata_charge_policy,
- alloc, eviction_callback, opts);
+ InitShards([&](Shard* cs) {
+ HyperClockTable::Opts table_opts;
+ table_opts.estimated_value_size = opts.estimated_entry_charge;
+ new (cs) Shard(per_shard, opts.strict_capacity_limit,
+ opts.metadata_charge_policy, alloc, &eviction_callback_,
+ table_opts);
});
}
@@ -1460,21 +1455,23 @@ std::shared_ptr NewClockCache(
}
std::shared_ptr HyperClockCacheOptions::MakeSharedCache() const {
- auto my_num_shard_bits = num_shard_bits;
- if (my_num_shard_bits >= 20) {
+ // For sanitized options
+ HyperClockCacheOptions opts = *this;
+ if (opts.num_shard_bits >= 20) {
return nullptr; // The cache cannot be sharded into too many fine pieces.
}
- if (my_num_shard_bits < 0) {
+ if (opts.num_shard_bits < 0) {
// Use larger shard size to reduce risk of large entries clustering
// or skewing individual shards.
constexpr size_t min_shard_size = 32U * 1024U * 1024U;
- my_num_shard_bits = GetDefaultCacheShardBits(capacity, min_shard_size);
+ opts.num_shard_bits =
+ GetDefaultCacheShardBits(opts.capacity, min_shard_size);
}
- std::shared_ptr cache = std::make_shared(
- capacity, estimated_entry_charge, my_num_shard_bits,
- strict_capacity_limit, metadata_charge_policy, memory_allocator);
- if (secondary_cache) {
- cache = std::make_shared(cache, secondary_cache);
+ std::shared_ptr cache =
+ std::make_shared(opts);
+ if (opts.secondary_cache) {
+ cache = std::make_shared(cache,
+ opts.secondary_cache);
}
return cache;
}
diff --git a/cache/clock_cache.h b/cache/clock_cache.h
index fc5aef6cb..a9515146a 100644
--- a/cache/clock_cache.h
+++ b/cache/clock_cache.h
@@ -682,10 +682,7 @@ class HyperClockCache
public:
using Shard = ClockCacheShard;
- HyperClockCache(size_t capacity, size_t estimated_value_size,
- int num_shard_bits, bool strict_capacity_limit,
- CacheMetadataChargePolicy metadata_charge_policy,
- std::shared_ptr memory_allocator);
+ explicit HyperClockCache(const HyperClockCacheOptions& opts);
const char* Name() const override { return "HyperClockCache"; }
diff --git a/cache/compressed_secondary_cache.cc b/cache/compressed_secondary_cache.cc
index affea8c54..2408afc0a 100644
--- a/cache/compressed_secondary_cache.cc
+++ b/cache/compressed_secondary_cache.cc
@@ -17,23 +17,8 @@
namespace ROCKSDB_NAMESPACE {
CompressedSecondaryCache::CompressedSecondaryCache(
- size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio, double low_pri_pool_ratio,
- std::shared_ptr memory_allocator, bool use_adaptive_mutex,
- CacheMetadataChargePolicy metadata_charge_policy,
- CompressionType compression_type, uint32_t compress_format_version,
- bool enable_custom_split_merge,
- const CacheEntryRoleSet& do_not_compress_roles)
- : cache_options_(capacity, num_shard_bits, strict_capacity_limit,
- high_pri_pool_ratio, low_pri_pool_ratio, memory_allocator,
- use_adaptive_mutex, metadata_charge_policy,
- compression_type, compress_format_version,
- enable_custom_split_merge, do_not_compress_roles) {
- cache_ =
- NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
- high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
- metadata_charge_policy, low_pri_pool_ratio);
-}
+ const CompressedSecondaryCacheOptions& opts)
+ : cache_(opts.LRUCacheOptions::MakeSharedCache()), cache_options_(opts) {}
CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
@@ -311,31 +296,9 @@ const Cache::CacheItemHelper* CompressedSecondaryCache::GetHelper(
}
}
-std::shared_ptr NewCompressedSecondaryCache(
- size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio, double low_pri_pool_ratio,
- std::shared_ptr memory_allocator, bool use_adaptive_mutex,
- CacheMetadataChargePolicy metadata_charge_policy,
- CompressionType compression_type, uint32_t compress_format_version,
- bool enable_custom_split_merge,
- const CacheEntryRoleSet& do_not_compress_roles) {
- return std::make_shared(
- capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
- low_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
- metadata_charge_policy, compression_type, compress_format_version,
- enable_custom_split_merge, do_not_compress_roles);
-}
-
-std::shared_ptr NewCompressedSecondaryCache(
- const CompressedSecondaryCacheOptions& opts) {
- // The secondary_cache is disabled for this LRUCache instance.
- assert(opts.secondary_cache == nullptr);
- return NewCompressedSecondaryCache(
- opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
- opts.high_pri_pool_ratio, opts.low_pri_pool_ratio, opts.memory_allocator,
- opts.use_adaptive_mutex, opts.metadata_charge_policy,
- opts.compression_type, opts.compress_format_version,
- opts.enable_custom_split_merge, opts.do_not_compress_roles);
+std::shared_ptr
+CompressedSecondaryCacheOptions::MakeSharedSecondaryCache() const {
+ return std::make_shared(*this);
}
} // namespace ROCKSDB_NAMESPACE
diff --git a/cache/compressed_secondary_cache.h b/cache/compressed_secondary_cache.h
index 7b45ca8bd..d20f2d1d7 100644
--- a/cache/compressed_secondary_cache.h
+++ b/cache/compressed_secondary_cache.h
@@ -69,18 +69,8 @@ class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
class CompressedSecondaryCache : public SecondaryCache {
public:
- CompressedSecondaryCache(
- size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio, double low_pri_pool_ratio,
- std::shared_ptr memory_allocator = nullptr,
- bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
- CacheMetadataChargePolicy metadata_charge_policy =
- kDefaultCacheMetadataChargePolicy,
- CompressionType compression_type = CompressionType::kLZ4Compression,
- uint32_t compress_format_version = 2,
- bool enable_custom_split_merge = false,
- const CacheEntryRoleSet& do_not_compress_roles = {
- CacheEntryRole::kFilterBlock});
+ explicit CompressedSecondaryCache(
+ const CompressedSecondaryCacheOptions& opts);
~CompressedSecondaryCache() override;
const char* Name() const override { return "CompressedSecondaryCache"; }
diff --git a/cache/compressed_secondary_cache_test.cc b/cache/compressed_secondary_cache_test.cc
index 1e41fc142..18b51ccf8 100644
--- a/cache/compressed_secondary_cache_test.cc
+++ b/cache/compressed_secondary_cache_test.cc
@@ -626,8 +626,9 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
std::unique_ptr sec_cache =
- std::make_unique(1000, 0, true, 0.5, 0.0,
- allocator);
+ std::make_unique(
+ CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
+ allocator));
Random rnd(301);
// 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
size_t str_size{8500};
@@ -678,7 +679,8 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
std::string str = str1 + str2 + str3;
std::unique_ptr sec_cache =
- std::make_unique(1000, 0, true, 0.5, 0.0);
+ std::make_unique(
+ CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0));
size_t charge{0};
CacheAllocationPtr value =
sec_cache->MergeChunksIntoValue(chunks_head, charge);
@@ -708,8 +710,9 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
std::unique_ptr sec_cache =
- std::make_unique(1000, 0, true, 0.5, 0.0,
- allocator);
+ std::make_unique(
+ CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
+ allocator));
Random rnd(301);
// 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
size_t str_size{8500};
diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc
index 3b4e80ef8..02119c819 100644
--- a/cache/lru_cache.cc
+++ b/cache/lru_cache.cc
@@ -646,23 +646,15 @@ void LRUCacheShard::AppendPrintableOptions(std::string& str) const {
str.append(buffer);
}
-LRUCache::LRUCache(size_t capacity, int num_shard_bits,
- bool strict_capacity_limit, double high_pri_pool_ratio,
- double low_pri_pool_ratio,
- std::shared_ptr allocator,
- bool use_adaptive_mutex,
- CacheMetadataChargePolicy metadata_charge_policy)
- : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
- std::move(allocator)) {
+LRUCache::LRUCache(const LRUCacheOptions& opts) : ShardedCache(opts) {
size_t per_shard = GetPerShardCapacity();
MemoryAllocator* alloc = memory_allocator();
- const EvictionCallback* eviction_callback = &eviction_callback_;
- InitShards([=](LRUCacheShard* cs) {
- new (cs) LRUCacheShard(per_shard, strict_capacity_limit,
- high_pri_pool_ratio, low_pri_pool_ratio,
- use_adaptive_mutex, metadata_charge_policy,
- /* max_upper_hash_bits */ 32 - num_shard_bits, alloc,
- eviction_callback);
+ InitShards([&](LRUCacheShard* cs) {
+ new (cs) LRUCacheShard(per_shard, opts.strict_capacity_limit,
+ opts.high_pri_pool_ratio, opts.low_pri_pool_ratio,
+ opts.use_adaptive_mutex, opts.metadata_charge_policy,
+ /* max_upper_hash_bits */ 32 - opts.num_shard_bits,
+ alloc, &eviction_callback_);
});
}
@@ -692,13 +684,7 @@ double LRUCache::GetHighPriPoolRatio() {
} // namespace lru_cache
-std::shared_ptr NewLRUCache(
- size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio,
- std::shared_ptr memory_allocator, bool use_adaptive_mutex,
- CacheMetadataChargePolicy metadata_charge_policy,
- const std::shared_ptr& secondary_cache,
- double low_pri_pool_ratio) {
+std::shared_ptr LRUCacheOptions::MakeSharedCache() const {
if (num_shard_bits >= 20) {
return nullptr; // The cache cannot be sharded into too many fine pieces.
}
@@ -714,36 +700,15 @@ std::shared_ptr NewLRUCache(
// Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
return nullptr;
}
- if (num_shard_bits < 0) {
- num_shard_bits = GetDefaultCacheShardBits(capacity);
+ // For sanitized options
+ LRUCacheOptions opts = *this;
+ if (opts.num_shard_bits < 0) {
+ opts.num_shard_bits = GetDefaultCacheShardBits(capacity);
}
- std::shared_ptr cache = std::make_shared(
- capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
- low_pri_pool_ratio, std::move(memory_allocator), use_adaptive_mutex,
- metadata_charge_policy);
+ std::shared_ptr cache = std::make_shared(opts);
if (secondary_cache) {
cache = std::make_shared(cache, secondary_cache);
}
return cache;
}
-
-std::shared_ptr NewLRUCache(const LRUCacheOptions& cache_opts) {
- return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
- cache_opts.strict_capacity_limit,
- cache_opts.high_pri_pool_ratio,
- cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
- cache_opts.metadata_charge_policy,
- cache_opts.secondary_cache, cache_opts.low_pri_pool_ratio);
-}
-
-std::shared_ptr NewLRUCache(
- size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio,
- std::shared_ptr memory_allocator, bool use_adaptive_mutex,
- CacheMetadataChargePolicy metadata_charge_policy,
- double low_pri_pool_ratio) {
- return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
- high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
- metadata_charge_policy, nullptr, low_pri_pool_ratio);
-}
} // namespace ROCKSDB_NAMESPACE
diff --git a/cache/lru_cache.h b/cache/lru_cache.h
index 554907b3b..9e6f15062 100644
--- a/cache/lru_cache.h
+++ b/cache/lru_cache.h
@@ -446,12 +446,7 @@ class LRUCache
#endif
: public ShardedCache {
public:
- LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- double high_pri_pool_ratio, double low_pri_pool_ratio,
- std::shared_ptr memory_allocator = nullptr,
- bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
- CacheMetadataChargePolicy metadata_charge_policy =
- kDontChargeCacheMetadata);
+ explicit LRUCache(const LRUCacheOptions& opts);
const char* Name() const override { return "LRUCache"; }
ObjectPtr Value(Handle* handle) override;
size_t GetCharge(Handle* handle) const override;
diff --git a/cache/sharded_cache.cc b/cache/sharded_cache.cc
index 9ebca3ba8..f8d518067 100644
--- a/cache/sharded_cache.cc
+++ b/cache/sharded_cache.cc
@@ -19,14 +19,12 @@
namespace ROCKSDB_NAMESPACE {
-ShardedCacheBase::ShardedCacheBase(size_t capacity, int num_shard_bits,
- bool strict_capacity_limit,
- std::shared_ptr allocator)
- : Cache(std::move(allocator)),
+ShardedCacheBase::ShardedCacheBase(const ShardedCacheOptions& opts)
+ : Cache(opts.memory_allocator),
last_id_(1),
- shard_mask_((uint32_t{1} << num_shard_bits) - 1),
- strict_capacity_limit_(strict_capacity_limit),
- capacity_(capacity) {}
+ shard_mask_((uint32_t{1} << opts.num_shard_bits) - 1),
+ strict_capacity_limit_(opts.strict_capacity_limit),
+ capacity_(opts.capacity) {}
size_t ShardedCacheBase::ComputePerShardCapacity(size_t capacity) const {
uint32_t num_shards = GetNumShards();
diff --git a/cache/sharded_cache.h b/cache/sharded_cache.h
index 04eaa5318..d689783d3 100644
--- a/cache/sharded_cache.h
+++ b/cache/sharded_cache.h
@@ -89,9 +89,7 @@ class CacheShardBase {
// Portions of ShardedCache that do not depend on the template parameter
class ShardedCacheBase : public Cache {
public:
- ShardedCacheBase(size_t capacity, int num_shard_bits,
- bool strict_capacity_limit,
- std::shared_ptr memory_allocator);
+ explicit ShardedCacheBase(const ShardedCacheOptions& opts);
virtual ~ShardedCacheBase() = default;
int GetNumShardBits() const;
@@ -134,10 +132,8 @@ class ShardedCache : public ShardedCacheBase {
using HashCref = typename CacheShard::HashCref;
using HandleImpl = typename CacheShard::HandleImpl;
- ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
- std::shared_ptr allocator)
- : ShardedCacheBase(capacity, num_shard_bits, strict_capacity_limit,
- allocator),
+ explicit ShardedCache(const ShardedCacheOptions& opts)
+ : ShardedCacheBase(opts),
shards_(reinterpret_cast(port::cacheline_aligned_alloc(
sizeof(CacheShard) * GetNumShards()))),
destroy_shards_in_dtor_(false) {}
diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc
index 1a1366353..8fa93d8d7 100644
--- a/db/db_block_cache_test.cc
+++ b/db/db_block_cache_test.cc
@@ -620,9 +620,9 @@ class MockCache : public LRUCache {
static uint32_t low_pri_insert_count;
MockCache()
- : LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/,
- false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/,
- 0.0 /*low_pri_pool_ratio*/) {}
+ : LRUCache(LRUCacheOptions(
+ size_t{1} << 25 /*capacity*/, 0 /*num_shard_bits*/,
+ false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/)) {}
using ShardedCache::Insert;
diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h
index 387da1753..9aadca947 100644
--- a/include/rocksdb/cache.h
+++ b/include/rocksdb/cache.h
@@ -151,6 +151,13 @@ struct ShardedCacheOptions {
metadata_charge_policy(_metadata_charge_policy) {}
};
+// LRUCache - A cache using LRU eviction to stay at or below a set capacity.
+// The cache is sharded to 2^num_shard_bits shards, by hash of the key.
+// The total capacity is divided and evenly assigned to each shard, and each
+// shard has its own LRU list for evictions. Each shard also has a mutex for
+// exclusive access during operations; even read operations need exclusive
+// access in order to update the LRU list. Mutex contention is usually low
+// with enough shards.
struct LRUCacheOptions : public ShardedCacheOptions {
// Ratio of cache reserved for high-priority and low-priority entries,
// respectively. (See Cache::Priority below more information on the levels.)
@@ -158,7 +165,8 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// values cannot exceed 1.
//
// If high_pri_pool_ratio is greater than zero, a dedicated high-priority LRU
- // list is maintained by the cache. Similarly, if low_pri_pool_ratio is
+ // list is maintained by the cache. A ratio of 0.5 means non-high-priority
+ // entries will use midpoint insertion. Similarly, if low_pri_pool_ratio is
// greater than zero, a dedicated low-priority LRU list is maintained.
// There is also a bottom-priority LRU list, which is always enabled and not
// explicitly configurable. Entries are spilled over to the next available
@@ -173,9 +181,6 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// otherwise, they are placed in the bottom-priority pool.) This results
// in lower-priority entries without hits getting evicted from the cache
// sooner.
- //
- // Default values: high_pri_pool_ratio = 0.5 (which is referred to as
- // "midpoint insertion"), low_pri_pool_ratio = 0
double high_pri_pool_ratio = 0.5;
double low_pri_pool_ratio = 0.0;
@@ -199,31 +204,36 @@ struct LRUCacheOptions : public ShardedCacheOptions {
high_pri_pool_ratio(_high_pri_pool_ratio),
low_pri_pool_ratio(_low_pri_pool_ratio),
use_adaptive_mutex(_use_adaptive_mutex) {}
+
+ // Construct an instance of LRUCache using these options
+ std::shared_ptr MakeSharedCache() const;
};
-// Create a new cache with a fixed size capacity. The cache is sharded
-// to 2^num_shard_bits shards, by hash of the key. The total capacity
-// is divided and evenly assigned to each shard. If strict_capacity_limit
-// is set, insert to the cache will fail when cache is full. User can also
-// set percentage of the cache reserves for high priority entries via
-// high_pri_pool_pct.
-// num_shard_bits = -1 means it is automatically determined: every shard
-// will be at least 512KB and number of shard bits will not exceed 6.
-extern std::shared_ptr NewLRUCache(
+// DEPRECATED wrapper function
+inline std::shared_ptr NewLRUCache(
size_t capacity, int num_shard_bits = -1,
bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
std::shared_ptr memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy,
- double low_pri_pool_ratio = 0.0);
-
-extern std::shared_ptr NewLRUCache(const LRUCacheOptions& cache_opts);
+ double low_pri_pool_ratio = 0.0) {
+ return LRUCacheOptions(capacity, num_shard_bits, strict_capacity_limit,
+ high_pri_pool_ratio, memory_allocator,
+ use_adaptive_mutex, metadata_charge_policy,
+ low_pri_pool_ratio)
+ .MakeSharedCache();
+}
+
+// DEPRECATED wrapper function
+inline std::shared_ptr NewLRUCache(const LRUCacheOptions& cache_opts) {
+ return cache_opts.MakeSharedCache();
+}
// EXPERIMENTAL
-// Options structure for configuring a SecondaryCache instance based on
-// LRUCache. The LRUCacheOptions.secondary_cache is not used and
-// should not be set.
+// Options structure for configuring a SecondaryCache instance with in-memory
+// compression. The implementation uses LRUCache so inherits its options,
+// except LRUCacheOptions.secondary_cache is not used and should not be set.
struct CompressedSecondaryCacheOptions : LRUCacheOptions {
// The compression method (if any) that is used to compress data.
CompressionType compression_type = CompressionType::kLZ4Compression;
@@ -264,11 +274,16 @@ struct CompressedSecondaryCacheOptions : LRUCacheOptions {
compress_format_version(_compress_format_version),
enable_custom_split_merge(_enable_custom_split_merge),
do_not_compress_roles(_do_not_compress_roles) {}
+
+ // Construct an instance of CompressedSecondaryCache using these options
+ std::shared_ptr MakeSharedSecondaryCache() const;
+
+ // Avoid confusion with LRUCache
+ std::shared_ptr MakeSharedCache() const = delete;
};
-// EXPERIMENTAL
-// Create a new Secondary Cache that is implemented on top of LRUCache.
-extern std::shared_ptr NewCompressedSecondaryCache(
+// DEPRECATED wrapper function
+inline std::shared_ptr NewCompressedSecondaryCache(
size_t capacity, int num_shard_bits = -1,
bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
double low_pri_pool_ratio = 0.0,
@@ -280,10 +295,21 @@ extern std::shared_ptr NewCompressedSecondaryCache(
uint32_t compress_format_version = 2,
bool enable_custom_split_merge = false,
const CacheEntryRoleSet& _do_not_compress_roles = {
- CacheEntryRole::kFilterBlock});
-
-extern std::shared_ptr NewCompressedSecondaryCache(
- const CompressedSecondaryCacheOptions& opts);
+ CacheEntryRole::kFilterBlock}) {
+ return CompressedSecondaryCacheOptions(
+ capacity, num_shard_bits, strict_capacity_limit,
+ high_pri_pool_ratio, low_pri_pool_ratio, memory_allocator,
+ use_adaptive_mutex, metadata_charge_policy, compression_type,
+ compress_format_version, enable_custom_split_merge,
+ _do_not_compress_roles)
+ .MakeSharedSecondaryCache();
+}
+
+// DEPRECATED wrapper function
+inline std::shared_ptr NewCompressedSecondaryCache(
+ const CompressedSecondaryCacheOptions& opts) {
+ return opts.MakeSharedSecondaryCache();
+}
// HyperClockCache - A lock-free Cache alternative for RocksDB block cache
// that offers much improved CPU efficiency vs. LRUCache under high parallel