Avoid long parameter lists configuring Caches (#11386)

Summary:
For better clarity, encouraging more options explicitly specified using fields rather than positionally via constructor parameter lists. Simplifies code maintenance as new fields are added. Deprecate some cases of the confusing pattern of NewWhatever() functions returning shared_ptr.

Net reduction of about 70 source code lines (including comments).

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11386

Test Plan: existing tests

Reviewed By: ajkr

Differential Revision: D45059075

Pulled By: pdillinger

fbshipit-source-id: d53fa09b268024f9c55254bb973b6c69feebf41a
oxigraph-8.3.2
Peter Dillinger 2 years ago committed by Facebook GitHub Bot
parent e0e318f370
commit 41a7fbf758
  1. 3
      HISTORY.md
  2. 45
      cache/clock_cache.cc
  3. 5
      cache/clock_cache.h
  4. 47
      cache/compressed_secondary_cache.cc
  5. 14
      cache/compressed_secondary_cache.h
  6. 13
      cache/compressed_secondary_cache_test.cc
  7. 61
      cache/lru_cache.cc
  8. 7
      cache/lru_cache.h
  9. 12
      cache/sharded_cache.cc
  10. 10
      cache/sharded_cache.h
  11. 6
      db/db_block_cache_test.cc
  12. 78
      include/rocksdb/cache.h

@ -4,6 +4,9 @@
* Introduced a new option `block_protection_bytes_per_key`, which can be used to enable per key-value integrity protection for in-memory blocks in block cache (#11287). * Introduced a new option `block_protection_bytes_per_key`, which can be used to enable per key-value integrity protection for in-memory blocks in block cache (#11287).
* Added `JemallocAllocatorOptions::num_arenas`. Setting `num_arenas > 1` may mitigate mutex contention in the allocator, particularly in scenarios where block allocations commonly bypass jemalloc tcache. * Added `JemallocAllocatorOptions::num_arenas`. Setting `num_arenas > 1` may mitigate mutex contention in the allocator, particularly in scenarios where block allocations commonly bypass jemalloc tcache.
### Public API Changes
* Add `MakeSharedCache()` construction functions to various cache Options objects, and deprecated the `NewWhateverCache()` functions with long parameter lists.
## 8.2.0 (04/24/2023) ## 8.2.0 (04/24/2023)
### Public API Changes ### Public API Changes
* `SstFileWriter::DeleteRange()` now returns `Status::InvalidArgument` if the range's end key comes before its start key according to the user comparator. Previously the behavior was undefined. * `SstFileWriter::DeleteRange()` now returns `Status::InvalidArgument` if the range's end key comes before its start key according to the user comparator. Previously the behavior was undefined.

@ -1282,25 +1282,20 @@ size_t ClockCacheShard<Table>::GetTableAddressCount() const {
// Explicit instantiation // Explicit instantiation
template class ClockCacheShard<HyperClockTable>; template class ClockCacheShard<HyperClockTable>;
HyperClockCache::HyperClockCache( HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
size_t capacity, size_t estimated_value_size, int num_shard_bits, : ShardedCache(opts) {
bool strict_capacity_limit, assert(opts.estimated_entry_charge > 0 ||
CacheMetadataChargePolicy metadata_charge_policy, opts.metadata_charge_policy != kDontChargeCacheMetadata);
std::shared_ptr<MemoryAllocator> memory_allocator)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(memory_allocator)) {
assert(estimated_value_size > 0 ||
metadata_charge_policy != kDontChargeCacheMetadata);
// TODO: should not need to go through two levels of pointer indirection to // TODO: should not need to go through two levels of pointer indirection to
// get to table entries // get to table entries
size_t per_shard = GetPerShardCapacity(); size_t per_shard = GetPerShardCapacity();
MemoryAllocator* alloc = this->memory_allocator(); MemoryAllocator* alloc = this->memory_allocator();
const Cache::EvictionCallback* eviction_callback = &eviction_callback_; InitShards([&](Shard* cs) {
InitShards([=](Shard* cs) { HyperClockTable::Opts table_opts;
HyperClockTable::Opts opts; table_opts.estimated_value_size = opts.estimated_entry_charge;
opts.estimated_value_size = estimated_value_size; new (cs) Shard(per_shard, opts.strict_capacity_limit,
new (cs) Shard(per_shard, strict_capacity_limit, metadata_charge_policy, opts.metadata_charge_policy, alloc, &eviction_callback_,
alloc, eviction_callback, opts); table_opts);
}); });
} }
@ -1460,21 +1455,23 @@ std::shared_ptr<Cache> NewClockCache(
} }
std::shared_ptr<Cache> HyperClockCacheOptions::MakeSharedCache() const { std::shared_ptr<Cache> HyperClockCacheOptions::MakeSharedCache() const {
auto my_num_shard_bits = num_shard_bits; // For sanitized options
if (my_num_shard_bits >= 20) { HyperClockCacheOptions opts = *this;
if (opts.num_shard_bits >= 20) {
return nullptr; // The cache cannot be sharded into too many fine pieces. return nullptr; // The cache cannot be sharded into too many fine pieces.
} }
if (my_num_shard_bits < 0) { if (opts.num_shard_bits < 0) {
// Use larger shard size to reduce risk of large entries clustering // Use larger shard size to reduce risk of large entries clustering
// or skewing individual shards. // or skewing individual shards.
constexpr size_t min_shard_size = 32U * 1024U * 1024U; constexpr size_t min_shard_size = 32U * 1024U * 1024U;
my_num_shard_bits = GetDefaultCacheShardBits(capacity, min_shard_size); opts.num_shard_bits =
GetDefaultCacheShardBits(opts.capacity, min_shard_size);
} }
std::shared_ptr<Cache> cache = std::make_shared<clock_cache::HyperClockCache>( std::shared_ptr<Cache> cache =
capacity, estimated_entry_charge, my_num_shard_bits, std::make_shared<clock_cache::HyperClockCache>(opts);
strict_capacity_limit, metadata_charge_policy, memory_allocator); if (opts.secondary_cache) {
if (secondary_cache) { cache = std::make_shared<CacheWithSecondaryAdapter>(cache,
cache = std::make_shared<CacheWithSecondaryAdapter>(cache, secondary_cache); opts.secondary_cache);
} }
return cache; return cache;
} }

@ -682,10 +682,7 @@ class HyperClockCache
public: public:
using Shard = ClockCacheShard<HyperClockTable>; using Shard = ClockCacheShard<HyperClockTable>;
HyperClockCache(size_t capacity, size_t estimated_value_size, explicit HyperClockCache(const HyperClockCacheOptions& opts);
int num_shard_bits, bool strict_capacity_limit,
CacheMetadataChargePolicy metadata_charge_policy,
std::shared_ptr<MemoryAllocator> memory_allocator);
const char* Name() const override { return "HyperClockCache"; } const char* Name() const override { return "HyperClockCache"; }

@ -17,23 +17,8 @@
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
CompressedSecondaryCache::CompressedSecondaryCache( CompressedSecondaryCache::CompressedSecondaryCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit, const CompressedSecondaryCacheOptions& opts)
double high_pri_pool_ratio, double low_pri_pool_ratio, : cache_(opts.LRUCacheOptions::MakeSharedCache()), cache_options_(opts) {}
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
CompressionType compression_type, uint32_t compress_format_version,
bool enable_custom_split_merge,
const CacheEntryRoleSet& do_not_compress_roles)
: cache_options_(capacity, num_shard_bits, strict_capacity_limit,
high_pri_pool_ratio, low_pri_pool_ratio, memory_allocator,
use_adaptive_mutex, metadata_charge_policy,
compression_type, compress_format_version,
enable_custom_split_merge, do_not_compress_roles) {
cache_ =
NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
metadata_charge_policy, low_pri_pool_ratio);
}
CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); } CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
@ -311,31 +296,9 @@ const Cache::CacheItemHelper* CompressedSecondaryCache::GetHelper(
} }
} }
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache( std::shared_ptr<SecondaryCache>
size_t capacity, int num_shard_bits, bool strict_capacity_limit, CompressedSecondaryCacheOptions::MakeSharedSecondaryCache() const {
double high_pri_pool_ratio, double low_pri_pool_ratio, return std::make_shared<CompressedSecondaryCache>(*this);
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
CompressionType compression_type, uint32_t compress_format_version,
bool enable_custom_split_merge,
const CacheEntryRoleSet& do_not_compress_roles) {
return std::make_shared<CompressedSecondaryCache>(
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
low_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
metadata_charge_policy, compression_type, compress_format_version,
enable_custom_split_merge, do_not_compress_roles);
}
std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
const CompressedSecondaryCacheOptions& opts) {
// The secondary_cache is disabled for this LRUCache instance.
assert(opts.secondary_cache == nullptr);
return NewCompressedSecondaryCache(
opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
opts.high_pri_pool_ratio, opts.low_pri_pool_ratio, opts.memory_allocator,
opts.use_adaptive_mutex, opts.metadata_charge_policy,
opts.compression_type, opts.compress_format_version,
opts.enable_custom_split_merge, opts.do_not_compress_roles);
} }
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -69,18 +69,8 @@ class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
class CompressedSecondaryCache : public SecondaryCache { class CompressedSecondaryCache : public SecondaryCache {
public: public:
CompressedSecondaryCache( explicit CompressedSecondaryCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit, const CompressedSecondaryCacheOptions& opts);
double high_pri_pool_ratio, double low_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy,
CompressionType compression_type = CompressionType::kLZ4Compression,
uint32_t compress_format_version = 2,
bool enable_custom_split_merge = false,
const CacheEntryRoleSet& do_not_compress_roles = {
CacheEntryRole::kFilterBlock});
~CompressedSecondaryCache() override; ~CompressedSecondaryCache() override;
const char* Name() const override { return "CompressedSecondaryCache"; } const char* Name() const override { return "CompressedSecondaryCache"; }

@ -626,8 +626,9 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk; using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
std::unique_ptr<CompressedSecondaryCache> sec_cache = std::unique_ptr<CompressedSecondaryCache> sec_cache =
std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0, std::make_unique<CompressedSecondaryCache>(
allocator); CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
allocator));
Random rnd(301); Random rnd(301);
// 8500 = 8169 + 233 + 98, so there should be 3 chunks after split. // 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
size_t str_size{8500}; size_t str_size{8500};
@ -678,7 +679,8 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
std::string str = str1 + str2 + str3; std::string str = str1 + str2 + str3;
std::unique_ptr<CompressedSecondaryCache> sec_cache = std::unique_ptr<CompressedSecondaryCache> sec_cache =
std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0); std::make_unique<CompressedSecondaryCache>(
CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0));
size_t charge{0}; size_t charge{0};
CacheAllocationPtr value = CacheAllocationPtr value =
sec_cache->MergeChunksIntoValue(chunks_head, charge); sec_cache->MergeChunksIntoValue(chunks_head, charge);
@ -708,8 +710,9 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk; using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
std::unique_ptr<CompressedSecondaryCache> sec_cache = std::unique_ptr<CompressedSecondaryCache> sec_cache =
std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0, std::make_unique<CompressedSecondaryCache>(
allocator); CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
allocator));
Random rnd(301); Random rnd(301);
// 8500 = 8169 + 233 + 98, so there should be 3 chunks after split. // 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
size_t str_size{8500}; size_t str_size{8500};

61
cache/lru_cache.cc vendored

@ -646,23 +646,15 @@ void LRUCacheShard::AppendPrintableOptions(std::string& str) const {
str.append(buffer); str.append(buffer);
} }
LRUCache::LRUCache(size_t capacity, int num_shard_bits, LRUCache::LRUCache(const LRUCacheOptions& opts) : ShardedCache(opts) {
bool strict_capacity_limit, double high_pri_pool_ratio,
double low_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> allocator,
bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(allocator)) {
size_t per_shard = GetPerShardCapacity(); size_t per_shard = GetPerShardCapacity();
MemoryAllocator* alloc = memory_allocator(); MemoryAllocator* alloc = memory_allocator();
const EvictionCallback* eviction_callback = &eviction_callback_; InitShards([&](LRUCacheShard* cs) {
InitShards([=](LRUCacheShard* cs) { new (cs) LRUCacheShard(per_shard, opts.strict_capacity_limit,
new (cs) LRUCacheShard(per_shard, strict_capacity_limit, opts.high_pri_pool_ratio, opts.low_pri_pool_ratio,
high_pri_pool_ratio, low_pri_pool_ratio, opts.use_adaptive_mutex, opts.metadata_charge_policy,
use_adaptive_mutex, metadata_charge_policy, /* max_upper_hash_bits */ 32 - opts.num_shard_bits,
/* max_upper_hash_bits */ 32 - num_shard_bits, alloc, alloc, &eviction_callback_);
eviction_callback);
}); });
} }
@ -692,13 +684,7 @@ double LRUCache::GetHighPriPoolRatio() {
} // namespace lru_cache } // namespace lru_cache
std::shared_ptr<Cache> NewLRUCache( std::shared_ptr<Cache> LRUCacheOptions::MakeSharedCache() const {
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<SecondaryCache>& secondary_cache,
double low_pri_pool_ratio) {
if (num_shard_bits >= 20) { if (num_shard_bits >= 20) {
return nullptr; // The cache cannot be sharded into too many fine pieces. return nullptr; // The cache cannot be sharded into too many fine pieces.
} }
@ -714,36 +700,15 @@ std::shared_ptr<Cache> NewLRUCache(
// Invalid high_pri_pool_ratio and low_pri_pool_ratio combination // Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
return nullptr; return nullptr;
} }
if (num_shard_bits < 0) { // For sanitized options
num_shard_bits = GetDefaultCacheShardBits(capacity); LRUCacheOptions opts = *this;
if (opts.num_shard_bits < 0) {
opts.num_shard_bits = GetDefaultCacheShardBits(capacity);
} }
std::shared_ptr<Cache> cache = std::make_shared<LRUCache>( std::shared_ptr<Cache> cache = std::make_shared<LRUCache>(opts);
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
low_pri_pool_ratio, std::move(memory_allocator), use_adaptive_mutex,
metadata_charge_policy);
if (secondary_cache) { if (secondary_cache) {
cache = std::make_shared<CacheWithSecondaryAdapter>(cache, secondary_cache); cache = std::make_shared<CacheWithSecondaryAdapter>(cache, secondary_cache);
} }
return cache; return cache;
} }
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit,
cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy,
cache_opts.secondary_cache, cache_opts.low_pri_pool_ratio);
}
std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
double low_pri_pool_ratio) {
return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
metadata_charge_policy, nullptr, low_pri_pool_ratio);
}
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

7
cache/lru_cache.h vendored

@ -446,12 +446,7 @@ class LRUCache
#endif #endif
: public ShardedCache<LRUCacheShard> { : public ShardedCache<LRUCacheShard> {
public: public:
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, explicit LRUCache(const LRUCacheOptions& opts);
double high_pri_pool_ratio, double low_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy =
kDontChargeCacheMetadata);
const char* Name() const override { return "LRUCache"; } const char* Name() const override { return "LRUCache"; }
ObjectPtr Value(Handle* handle) override; ObjectPtr Value(Handle* handle) override;
size_t GetCharge(Handle* handle) const override; size_t GetCharge(Handle* handle) const override;

@ -19,14 +19,12 @@
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
ShardedCacheBase::ShardedCacheBase(size_t capacity, int num_shard_bits, ShardedCacheBase::ShardedCacheBase(const ShardedCacheOptions& opts)
bool strict_capacity_limit, : Cache(opts.memory_allocator),
std::shared_ptr<MemoryAllocator> allocator)
: Cache(std::move(allocator)),
last_id_(1), last_id_(1),
shard_mask_((uint32_t{1} << num_shard_bits) - 1), shard_mask_((uint32_t{1} << opts.num_shard_bits) - 1),
strict_capacity_limit_(strict_capacity_limit), strict_capacity_limit_(opts.strict_capacity_limit),
capacity_(capacity) {} capacity_(opts.capacity) {}
size_t ShardedCacheBase::ComputePerShardCapacity(size_t capacity) const { size_t ShardedCacheBase::ComputePerShardCapacity(size_t capacity) const {
uint32_t num_shards = GetNumShards(); uint32_t num_shards = GetNumShards();

@ -89,9 +89,7 @@ class CacheShardBase {
// Portions of ShardedCache that do not depend on the template parameter // Portions of ShardedCache that do not depend on the template parameter
class ShardedCacheBase : public Cache { class ShardedCacheBase : public Cache {
public: public:
ShardedCacheBase(size_t capacity, int num_shard_bits, explicit ShardedCacheBase(const ShardedCacheOptions& opts);
bool strict_capacity_limit,
std::shared_ptr<MemoryAllocator> memory_allocator);
virtual ~ShardedCacheBase() = default; virtual ~ShardedCacheBase() = default;
int GetNumShardBits() const; int GetNumShardBits() const;
@ -134,10 +132,8 @@ class ShardedCache : public ShardedCacheBase {
using HashCref = typename CacheShard::HashCref; using HashCref = typename CacheShard::HashCref;
using HandleImpl = typename CacheShard::HandleImpl; using HandleImpl = typename CacheShard::HandleImpl;
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, explicit ShardedCache(const ShardedCacheOptions& opts)
std::shared_ptr<MemoryAllocator> allocator) : ShardedCacheBase(opts),
: ShardedCacheBase(capacity, num_shard_bits, strict_capacity_limit,
allocator),
shards_(reinterpret_cast<CacheShard*>(port::cacheline_aligned_alloc( shards_(reinterpret_cast<CacheShard*>(port::cacheline_aligned_alloc(
sizeof(CacheShard) * GetNumShards()))), sizeof(CacheShard) * GetNumShards()))),
destroy_shards_in_dtor_(false) {} destroy_shards_in_dtor_(false) {}

@ -620,9 +620,9 @@ class MockCache : public LRUCache {
static uint32_t low_pri_insert_count; static uint32_t low_pri_insert_count;
MockCache() MockCache()
: LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/, : LRUCache(LRUCacheOptions(
false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/, size_t{1} << 25 /*capacity*/, 0 /*num_shard_bits*/,
0.0 /*low_pri_pool_ratio*/) {} false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/)) {}
using ShardedCache::Insert; using ShardedCache::Insert;

@ -151,6 +151,13 @@ struct ShardedCacheOptions {
metadata_charge_policy(_metadata_charge_policy) {} metadata_charge_policy(_metadata_charge_policy) {}
}; };
// LRUCache - A cache using LRU eviction to stay at or below a set capacity.
// The cache is sharded to 2^num_shard_bits shards, by hash of the key.
// The total capacity is divided and evenly assigned to each shard, and each
// shard has its own LRU list for evictions. Each shard also has a mutex for
// exclusive access during operations; even read operations need exclusive
// access in order to update the LRU list. Mutex contention is usually low
// with enough shards.
struct LRUCacheOptions : public ShardedCacheOptions { struct LRUCacheOptions : public ShardedCacheOptions {
// Ratio of cache reserved for high-priority and low-priority entries, // Ratio of cache reserved for high-priority and low-priority entries,
// respectively. (See Cache::Priority below more information on the levels.) // respectively. (See Cache::Priority below more information on the levels.)
@ -158,7 +165,8 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// values cannot exceed 1. // values cannot exceed 1.
// //
// If high_pri_pool_ratio is greater than zero, a dedicated high-priority LRU // If high_pri_pool_ratio is greater than zero, a dedicated high-priority LRU
// list is maintained by the cache. Similarly, if low_pri_pool_ratio is // list is maintained by the cache. A ratio of 0.5 means non-high-priority
// entries will use midpoint insertion. Similarly, if low_pri_pool_ratio is
// greater than zero, a dedicated low-priority LRU list is maintained. // greater than zero, a dedicated low-priority LRU list is maintained.
// There is also a bottom-priority LRU list, which is always enabled and not // There is also a bottom-priority LRU list, which is always enabled and not
// explicitly configurable. Entries are spilled over to the next available // explicitly configurable. Entries are spilled over to the next available
@ -173,9 +181,6 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// otherwise, they are placed in the bottom-priority pool.) This results // otherwise, they are placed in the bottom-priority pool.) This results
// in lower-priority entries without hits getting evicted from the cache // in lower-priority entries without hits getting evicted from the cache
// sooner. // sooner.
//
// Default values: high_pri_pool_ratio = 0.5 (which is referred to as
// "midpoint insertion"), low_pri_pool_ratio = 0
double high_pri_pool_ratio = 0.5; double high_pri_pool_ratio = 0.5;
double low_pri_pool_ratio = 0.0; double low_pri_pool_ratio = 0.0;
@ -199,31 +204,36 @@ struct LRUCacheOptions : public ShardedCacheOptions {
high_pri_pool_ratio(_high_pri_pool_ratio), high_pri_pool_ratio(_high_pri_pool_ratio),
low_pri_pool_ratio(_low_pri_pool_ratio), low_pri_pool_ratio(_low_pri_pool_ratio),
use_adaptive_mutex(_use_adaptive_mutex) {} use_adaptive_mutex(_use_adaptive_mutex) {}
// Construct an instance of LRUCache using these options
std::shared_ptr<Cache> MakeSharedCache() const;
}; };
// Create a new cache with a fixed size capacity. The cache is sharded // DEPRECATED wrapper function
// to 2^num_shard_bits shards, by hash of the key. The total capacity inline std::shared_ptr<Cache> NewLRUCache(
// is divided and evenly assigned to each shard. If strict_capacity_limit
// is set, insert to the cache will fail when cache is full. User can also
// set percentage of the cache reserves for high priority entries via
// high_pri_pool_pct.
// num_shard_bits = -1 means it is automatically determined: every shard
// will be at least 512KB and number of shard bits will not exceed 6.
extern std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits = -1, size_t capacity, int num_shard_bits = -1,
bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5, bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr, std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex, bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy = CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy, kDefaultCacheMetadataChargePolicy,
double low_pri_pool_ratio = 0.0); double low_pri_pool_ratio = 0.0) {
return LRUCacheOptions(capacity, num_shard_bits, strict_capacity_limit,
extern std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts); high_pri_pool_ratio, memory_allocator,
use_adaptive_mutex, metadata_charge_policy,
low_pri_pool_ratio)
.MakeSharedCache();
}
// DEPRECATED wrapper function
inline std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return cache_opts.MakeSharedCache();
}
// EXPERIMENTAL // EXPERIMENTAL
// Options structure for configuring a SecondaryCache instance based on // Options structure for configuring a SecondaryCache instance with in-memory
// LRUCache. The LRUCacheOptions.secondary_cache is not used and // compression. The implementation uses LRUCache so inherits its options,
// should not be set. // except LRUCacheOptions.secondary_cache is not used and should not be set.
struct CompressedSecondaryCacheOptions : LRUCacheOptions { struct CompressedSecondaryCacheOptions : LRUCacheOptions {
// The compression method (if any) that is used to compress data. // The compression method (if any) that is used to compress data.
CompressionType compression_type = CompressionType::kLZ4Compression; CompressionType compression_type = CompressionType::kLZ4Compression;
@ -264,11 +274,16 @@ struct CompressedSecondaryCacheOptions : LRUCacheOptions {
compress_format_version(_compress_format_version), compress_format_version(_compress_format_version),
enable_custom_split_merge(_enable_custom_split_merge), enable_custom_split_merge(_enable_custom_split_merge),
do_not_compress_roles(_do_not_compress_roles) {} do_not_compress_roles(_do_not_compress_roles) {}
// Construct an instance of CompressedSecondaryCache using these options
std::shared_ptr<SecondaryCache> MakeSharedSecondaryCache() const;
// Avoid confusion with LRUCache
std::shared_ptr<Cache> MakeSharedCache() const = delete;
}; };
// EXPERIMENTAL // DEPRECATED wrapper function
// Create a new Secondary Cache that is implemented on top of LRUCache. inline std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
extern std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
size_t capacity, int num_shard_bits = -1, size_t capacity, int num_shard_bits = -1,
bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5, bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5,
double low_pri_pool_ratio = 0.0, double low_pri_pool_ratio = 0.0,
@ -280,10 +295,21 @@ extern std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
uint32_t compress_format_version = 2, uint32_t compress_format_version = 2,
bool enable_custom_split_merge = false, bool enable_custom_split_merge = false,
const CacheEntryRoleSet& _do_not_compress_roles = { const CacheEntryRoleSet& _do_not_compress_roles = {
CacheEntryRole::kFilterBlock}); CacheEntryRole::kFilterBlock}) {
return CompressedSecondaryCacheOptions(
extern std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache( capacity, num_shard_bits, strict_capacity_limit,
const CompressedSecondaryCacheOptions& opts); high_pri_pool_ratio, low_pri_pool_ratio, memory_allocator,
use_adaptive_mutex, metadata_charge_policy, compression_type,
compress_format_version, enable_custom_split_merge,
_do_not_compress_roles)
.MakeSharedSecondaryCache();
}
// DEPRECATED wrapper function
inline std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
const CompressedSecondaryCacheOptions& opts) {
return opts.MakeSharedSecondaryCache();
}
// HyperClockCache - A lock-free Cache alternative for RocksDB block cache // HyperClockCache - A lock-free Cache alternative for RocksDB block cache
// that offers much improved CPU efficiency vs. LRUCache under high parallel // that offers much improved CPU efficiency vs. LRUCache under high parallel

Loading…
Cancel
Save