@ -151,6 +151,13 @@ struct ShardedCacheOptions {
metadata_charge_policy ( _metadata_charge_policy ) { }
} ;
// LRUCache - A cache using LRU eviction to stay at or below a set capacity.
// The cache is sharded to 2^num_shard_bits shards, by hash of the key.
// The total capacity is divided and evenly assigned to each shard, and each
// shard has its own LRU list for evictions. Each shard also has a mutex for
// exclusive access during operations; even read operations need exclusive
// access in order to update the LRU list. Mutex contention is usually low
// with enough shards.
struct LRUCacheOptions : public ShardedCacheOptions {
// Ratio of cache reserved for high-priority and low-priority entries,
// respectively. (See Cache::Priority below more information on the levels.)
@ -158,7 +165,8 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// values cannot exceed 1.
//
// If high_pri_pool_ratio is greater than zero, a dedicated high-priority LRU
// list is maintained by the cache. Similarly, if low_pri_pool_ratio is
// list is maintained by the cache. A ratio of 0.5 means non-high-priority
// entries will use midpoint insertion. Similarly, if low_pri_pool_ratio is
// greater than zero, a dedicated low-priority LRU list is maintained.
// There is also a bottom-priority LRU list, which is always enabled and not
// explicitly configurable. Entries are spilled over to the next available
@ -173,9 +181,6 @@ struct LRUCacheOptions : public ShardedCacheOptions {
// otherwise, they are placed in the bottom-priority pool.) This results
// in lower-priority entries without hits getting evicted from the cache
// sooner.
//
// Default values: high_pri_pool_ratio = 0.5 (which is referred to as
// "midpoint insertion"), low_pri_pool_ratio = 0
double high_pri_pool_ratio = 0.5 ;
double low_pri_pool_ratio = 0.0 ;
@ -199,31 +204,36 @@ struct LRUCacheOptions : public ShardedCacheOptions {
high_pri_pool_ratio ( _high_pri_pool_ratio ) ,
low_pri_pool_ratio ( _low_pri_pool_ratio ) ,
use_adaptive_mutex ( _use_adaptive_mutex ) { }
// Construct an instance of LRUCache using these options
std : : shared_ptr < Cache > MakeSharedCache ( ) const ;
} ;
// Create a new cache with a fixed size capacity. The cache is sharded
// to 2^num_shard_bits shards, by hash of the key. The total capacity
// is divided and evenly assigned to each shard. If strict_capacity_limit
// is set, insert to the cache will fail when cache is full. User can also
// set percentage of the cache reserves for high priority entries via
// high_pri_pool_pct.
// num_shard_bits = -1 means it is automatically determined: every shard
// will be at least 512KB and number of shard bits will not exceed 6.
extern std : : shared_ptr < Cache > NewLRUCache (
// DEPRECATED wrapper function
inline std : : shared_ptr < Cache > NewLRUCache (
size_t capacity , int num_shard_bits = - 1 ,
bool strict_capacity_limit = false , double high_pri_pool_ratio = 0.5 ,
std : : shared_ptr < MemoryAllocator > memory_allocator = nullptr ,
bool use_adaptive_mutex = kDefaultToAdaptiveMutex ,
CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy ,
double low_pri_pool_ratio = 0.0 ) ;
extern std : : shared_ptr < Cache > NewLRUCache ( const LRUCacheOptions & cache_opts ) ;
double low_pri_pool_ratio = 0.0 ) {
return LRUCacheOptions ( capacity , num_shard_bits , strict_capacity_limit ,
high_pri_pool_ratio , memory_allocator ,
use_adaptive_mutex , metadata_charge_policy ,
low_pri_pool_ratio )
. MakeSharedCache ( ) ;
}
// DEPRECATED wrapper function
inline std : : shared_ptr < Cache > NewLRUCache ( const LRUCacheOptions & cache_opts ) {
return cache_opts . MakeSharedCache ( ) ;
}
// EXPERIMENTAL
// Options structure for configuring a SecondaryCache instance based on
// LRUCache. The LRUCacheOptions.secondary_cache is not used and
// should not be set.
// Options structure for configuring a SecondaryCache instance with in-memory
// compression. The implementation uses LRUCache so inherits its options,
// except LRUCacheOptions.secondary_cache is not used and should not be set.
struct CompressedSecondaryCacheOptions : LRUCacheOptions {
// The compression method (if any) that is used to compress data.
CompressionType compression_type = CompressionType : : kLZ4Compression ;
@ -264,11 +274,16 @@ struct CompressedSecondaryCacheOptions : LRUCacheOptions {
compress_format_version ( _compress_format_version ) ,
enable_custom_split_merge ( _enable_custom_split_merge ) ,
do_not_compress_roles ( _do_not_compress_roles ) { }
// Construct an instance of CompressedSecondaryCache using these options
std : : shared_ptr < SecondaryCache > MakeSharedSecondaryCache ( ) const ;
// Avoid confusion with LRUCache
std : : shared_ptr < Cache > MakeSharedCache ( ) const = delete ;
} ;
// EXPERIMENTAL
// Create a new Secondary Cache that is implemented on top of LRUCache.
extern std : : shared_ptr < SecondaryCache > NewCompressedSecondaryCache (
// DEPRECATED wrapper function
inline std : : shared_ptr < SecondaryCache > NewCompressedSecondaryCache (
size_t capacity , int num_shard_bits = - 1 ,
bool strict_capacity_limit = false , double high_pri_pool_ratio = 0.5 ,
double low_pri_pool_ratio = 0.0 ,
@ -280,10 +295,21 @@ extern std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
uint32_t compress_format_version = 2 ,
bool enable_custom_split_merge = false ,
const CacheEntryRoleSet & _do_not_compress_roles = {
CacheEntryRole : : kFilterBlock } ) ;
extern std : : shared_ptr < SecondaryCache > NewCompressedSecondaryCache (
const CompressedSecondaryCacheOptions & opts ) ;
CacheEntryRole : : kFilterBlock } ) {
return CompressedSecondaryCacheOptions (
capacity , num_shard_bits , strict_capacity_limit ,
high_pri_pool_ratio , low_pri_pool_ratio , memory_allocator ,
use_adaptive_mutex , metadata_charge_policy , compression_type ,
compress_format_version , enable_custom_split_merge ,
_do_not_compress_roles )
. MakeSharedSecondaryCache ( ) ;
}
// DEPRECATED wrapper function
inline std : : shared_ptr < SecondaryCache > NewCompressedSecondaryCache (
const CompressedSecondaryCacheOptions & opts ) {
return opts . MakeSharedSecondaryCache ( ) ;
}
// HyperClockCache - A lock-free Cache alternative for RocksDB block cache
// that offers much improved CPU efficiency vs. LRUCache under high parallel