From b2943dc133b4efed50d640934451d9b1c17e9d94 Mon Sep 17 00:00:00 2001 From: Niklas Fiekas Date: Wed, 26 Apr 2023 10:46:42 +0200 Subject: [PATCH] Add Cache::new_hyper_clock_cache() (#775) --- src/db_options.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/db_options.rs b/src/db_options.rs index ef0ebe2..c9fb5c5 100644 --- a/src/db_options.rs +++ b/src/db_options.rs @@ -57,6 +57,35 @@ impl Cache { Cache(Arc::new(CacheWrapper { inner })) } + /// Creates a HyperClockCache with capacity in bytes. + /// + /// `estimated_entry_charge` is an important tuning parameter. The optimal + /// choice at any given time is + /// `(cache.get_usage() - 64 * cache.get_table_address_count()) / + /// cache.get_occupancy_count()`, or approximately `cache.get_usage() / + /// cache.get_occupancy_count()`. + /// + /// However, the value cannot be changed dynamically, so as the cache + /// composition changes at runtime, the following tradeoffs apply: + /// + /// * If the estimate is substantially too high (e.g., 25% higher), + /// the cache may have to evict entries to prevent load factors that + /// would dramatically affect lookup times. + /// * If the estimate is substantially too low (e.g., less than half), + /// then meta data space overhead is substantially higher. + /// + /// The latter is generally preferable, and picking the larger of + /// block size and meta data block size is a reasonable choice that + /// errs towards this side. + pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache { + Cache(Arc::new(CacheWrapper { + inner: NonNull::new(unsafe { + ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge) + }) + .unwrap(), + })) + } + /// Returns the cache memory usage in bytes. pub fn get_usage(&self) -> usize { unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }