|
|
|
@ -187,12 +187,43 @@ impl Drop for ReadOptions { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
impl BlockBasedOptions { |
|
|
|
|
/// Approximate size of user data packed per block. Note that the
|
|
|
|
|
/// block size specified here corresponds to uncompressed data. The
|
|
|
|
|
/// actual size of the unit read from disk may be smaller if
|
|
|
|
|
/// compression is enabled. This parameter can be changed dynamically.
|
|
|
|
|
pub fn set_block_size(&mut self, size: usize) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_block_based_options_set_block_size(self.inner, size); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// Block size for partitioned metadata. Currently applied to indexes when
|
|
|
|
|
/// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
|
|
|
|
|
/// Note: Since in the current implementation the filters and index partitions
|
|
|
|
|
/// are aligned, an index/filter block is created when either index or filter
|
|
|
|
|
/// block size reaches the specified limit.
|
|
|
|
|
///
|
|
|
|
|
/// Note: this limit is currently applied to only index blocks; a filter
|
|
|
|
|
/// partition is cut right after an index block is cut.
|
|
|
|
|
pub fn set_metadata_block_size(&mut self, size: usize) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// Note: currently this option requires kTwoLevelIndexSearch to be set as
|
|
|
|
|
/// well.
|
|
|
|
|
///
|
|
|
|
|
/// Use partitioned full filters for each SST file. This option is
|
|
|
|
|
/// incompatible with block-based filters.
|
|
|
|
|
pub fn set_partition_filters(&mut self, size: bool) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_block_based_options_set_partition_filters(self.inner, size as c_uchar); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// When provided: use the specified cache for blocks.
|
|
|
|
|
/// Otherwise rocksdb will automatically create and use an 8MB internal cache.
|
|
|
|
|
pub fn set_lru_cache(&mut self, size: size_t) { |
|
|
|
|
let cache = new_cache(size); |
|
|
|
|
unsafe { |
|
|
|
@ -202,6 +233,20 @@ impl BlockBasedOptions { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// When configured: use the specified cache for compressed blocks.
|
|
|
|
|
/// Otherwise rocksdb will not use a compressed block cache.
|
|
|
|
|
///
|
|
|
|
|
/// Note: though it looks similar to `block_cache`, RocksDB doesn't put the
|
|
|
|
|
/// same type of object there.
|
|
|
|
|
pub fn set_lru_cache_compressed(&mut self, size: size_t) { |
|
|
|
|
let cache = new_cache(size); |
|
|
|
|
unsafe { |
|
|
|
|
// Since cache is wrapped in shared_ptr, we don't need to
|
|
|
|
|
// call rocksdb_cache_destroy explicitly.
|
|
|
|
|
ffi::rocksdb_block_based_options_set_block_cache_compressed(self.inner, cache); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
pub fn disable_cache(&mut self) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, true as c_uchar); |
|
|
|
@ -259,6 +304,22 @@ impl BlockBasedOptions { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// If cache_index_and_filter_blocks is true and the below is true, then
|
|
|
|
|
/// the top-level index of partitioned filter and index blocks are stored in
|
|
|
|
|
/// the cache, but a reference is held in the "table reader" object so the
|
|
|
|
|
/// blocks are pinned and only evicted from cache when the table reader is
|
|
|
|
|
/// freed. This is not limited to l0 in LSM tree.
|
|
|
|
|
///
|
|
|
|
|
/// Default: false.
|
|
|
|
|
pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter( |
|
|
|
|
self.inner, |
|
|
|
|
v as c_uchar, |
|
|
|
|
); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// Format version, reserved for backward compatibility.
|
|
|
|
|
/// See https://github.com/facebook/rocksdb/blob/f059c7d9b96300091e07429a60f4ad55dac84859/include/rocksdb/table.h#L249-L274.
|
|
|
|
|
///
|
|
|
|
@ -385,6 +446,31 @@ impl Options { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// Optimize universal style compaction.
|
|
|
|
|
///
|
|
|
|
|
/// Default values for some parameters in `Options` are not optimized for heavy
|
|
|
|
|
/// workloads and big datasets, which means you might observe write stalls under
|
|
|
|
|
/// some conditions.
|
|
|
|
|
///
|
|
|
|
|
/// This can be used as one of the starting points for tuning RocksDB options in
|
|
|
|
|
/// such cases.
|
|
|
|
|
///
|
|
|
|
|
/// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
|
|
|
|
|
/// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
|
|
|
|
|
/// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
|
|
|
|
|
/// parameters were set before.
|
|
|
|
|
///
|
|
|
|
|
/// It sets buffer sizes so that memory consumption would be constrained by
|
|
|
|
|
/// `memtable_memory_budget`.
|
|
|
|
|
pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) { |
|
|
|
|
unsafe { |
|
|
|
|
ffi::rocksdb_options_optimize_universal_style_compaction( |
|
|
|
|
self.inner, |
|
|
|
|
memtable_memory_budget as u64, |
|
|
|
|
); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// If true, the database will be created if it is missing.
|
|
|
|
|
///
|
|
|
|
|
/// Default: `false`
|
|
|
|
@ -1246,6 +1332,38 @@ impl Options { |
|
|
|
|
unsafe { ffi::rocksdb_options_set_disable_auto_compactions(self.inner, disable as c_int) } |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// SetMemtableHugePageSize sets the page size for huge page for
|
|
|
|
|
/// arena used by the memtable.
|
|
|
|
|
/// If <=0, it won't allocate from huge page but from malloc.
|
|
|
|
|
/// Users are responsible to reserve huge pages for it to be allocated. For
|
|
|
|
|
/// example:
|
|
|
|
|
/// sysctl -w vm.nr_hugepages=20
|
|
|
|
|
/// See linux doc Documentation/vm/hugetlbpage.txt
|
|
|
|
|
/// If there isn't enough free huge page available, it will fall back to
|
|
|
|
|
/// malloc.
|
|
|
|
|
///
|
|
|
|
|
/// Dynamically changeable through SetOptions() API
|
|
|
|
|
pub fn set_memtable_huge_page_size(&mut self, size: size_t) { |
|
|
|
|
unsafe { ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size) } |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// By default, a single write thread queue is maintained. The thread gets
|
|
|
|
|
/// to the head of the queue becomes write batch group leader and responsible
|
|
|
|
|
/// for writing to WAL and memtable for the batch group.
|
|
|
|
|
///
|
|
|
|
|
/// If enable_pipelined_write is true, separate write thread queue is
|
|
|
|
|
/// maintained for WAL write and memtable write. A write thread first enter WAL
|
|
|
|
|
/// writer queue and then memtable writer queue. Pending thread on the WAL
|
|
|
|
|
/// writer queue thus only have to wait for previous writers to finish their
|
|
|
|
|
/// WAL writing but not the memtable writing. Enabling the feature may improve
|
|
|
|
|
/// write throughput and reduce latency of the prepare phase of two-phase
|
|
|
|
|
/// commit.
|
|
|
|
|
///
|
|
|
|
|
/// Default: false
|
|
|
|
|
pub fn set_enable_pipelined_write(&mut self, value: bool) { |
|
|
|
|
unsafe { ffi::rocksdb_options_set_enable_pipelined_write(self.inner, value as c_uchar) } |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/// Defines the underlying memtable implementation.
|
|
|
|
|
/// See https://github.com/facebook/rocksdb/wiki/MemTable for more information.
|
|
|
|
|
/// Defaults to using a skiplist.
|
|
|
|
|