From 841c23b26549b6a313b66cf9eacb130f8d0d2138 Mon Sep 17 00:00:00 2001 From: Stanislav Tkach Date: Wed, 12 May 2021 11:37:59 +0300 Subject: [PATCH] Add the DB::key_may_exist_cf_opt method --- CHANGELOG.md | 7 +++++-- src/db_options.rs | 13 +++++++++++++ tests/test_db.rs | 4 ++-- tests/test_rocksdb_options.rs | 11 +++++++++++ 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03b1ddc..260d6f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## [Unreleased] +* Add `DB::key_may_exist_cf_opt` method (stanislav-tkach) +* Add `Options::set_zstd_max_train_bytes` method (stanislav-tkach) + ## 0.16.0 (2021-04-18) * Add `DB::cancel_all_background_work` method (stanislav-tkach) @@ -15,13 +18,13 @@ * Bump `librocksdb-sys` up to 6.17.3 (ordian) * Remove the need for `&mut self` in `create_cf` and `drop_cf` (v2) (ryoqun) * Keep Cache and Env alive with Rc (acrrd) -* Add DB::open_cf_with_ttl method (fdeantoni) +* Add `DB::open_cf_with_ttl` method (fdeantoni) ## 0.15.0 (2020-08-25) * Fix building rocksdb library on windows host (aleksuss) * Add github actions CI for windows build (aleksuss) -* Update doc for Options::set_compression_type (wqfish) +* Update doc for `Options::set_compression_type` (wqfish) * Add clippy linter in CI (aleksuss) * Use DBPath for backup_restore test (wqfish) * Allow to build RocksDB with a different stdlib (calavera) diff --git a/src/db_options.rs b/src/db_options.rs index 50e2d28..53d28bf 100644 --- a/src/db_options.rs +++ b/src/db_options.rs @@ -977,6 +977,19 @@ impl Options { } } + /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's + /// dictionary trainer can achieve even better compression ratio improvements than using + /// `max_dict_bytes` alone. + /// + /// The training data will be used to generate a dictionary of max_dict_bytes. + /// + /// Default: 0. + pub fn set_zstd_max_train_bytes(&mut self, value: c_int) { + unsafe { + ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value); + } + } + /// If non-zero, we perform bigger reads when doing compaction. If you're /// running RocksDB on spinning disks, you should set this to at least 2MB. /// That way RocksDB's compaction is doing sequential instead of random reads. diff --git a/tests/test_db.rs b/tests/test_db.rs index 8aa682f..d808cdc 100644 --- a/tests/test_db.rs +++ b/tests/test_db.rs @@ -968,7 +968,7 @@ fn multi_get_cf() { #[test] fn key_may_exist() { - let path = DBPath::new("_rust_rocksdb_multi_get"); + let path = DBPath::new("_rust_key_may_exist"); { let db = DB::open_default(&path).unwrap(); @@ -982,7 +982,7 @@ fn key_may_exist() { #[test] fn key_may_exist_cf() { - let path = DBPath::new("_rust_rocksdb_multi_get_cf"); + let path = DBPath::new("_rust_key_may_exist_cf"); { let mut opts = Options::default(); diff --git a/tests/test_rocksdb_options.rs b/tests/test_rocksdb_options.rs index 8089b3d..62a08bb 100644 --- a/tests/test_rocksdb_options.rs +++ b/tests/test_rocksdb_options.rs @@ -134,3 +134,14 @@ fn test_set_data_block_index_type() { assert!(settings.contains("data_block_hash_table_util_ratio: 0.350000")); } } + +#[test] +fn set_compression_options_zstd_max_train_bytes() { + let path = DBPath::new("_rust_set_compression_options_zstd_max_train_bytes"); + { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_zstd_max_train_bytes(100); + let _db = DB::open(&opts, &path).unwrap(); + } +}