Release 0.17.0 (#539)

master
Oleksandr Anyshchenko 3 years ago committed by GitHub
parent abf121f20c
commit 6e3d781c15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/rust.yml
  2. 12
      CHANGELOG.md
  3. 4
      Cargo.toml
  4. 2
      librocksdb-sys/Cargo.toml
  5. 2
      src/column_family.rs
  6. 10
      src/compaction_filter.rs
  7. 8
      src/compaction_filter_factory.rs
  8. 4
      src/db.rs
  9. 2
      src/db_iterator.rs
  10. 13
      tests/test_db.rs
  11. 99
      tests/test_merge_operator.rs

@ -41,7 +41,7 @@ jobs:
uses: actions-rs/clippy-check@v1 uses: actions-rs/clippy-check@v1
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
args: -- -D warnings args: --all-targets -- -D warnings
audit: audit:
name: Security audit name: Security audit

@ -2,12 +2,20 @@
## [Unreleased] ## [Unreleased]
## 0.17.0 (2021-07-22)
* Fix `multi_get` method (mikhailOK)
* Bump `librocksdb-sys` up to 6.19.3 (olegnn) * Bump `librocksdb-sys` up to 6.19.3 (olegnn)
* Make SSE inclusion conditional for target features. * Add support for the cuckoo table format (rbost)
RocksDB is not compiled with SSE4 instructions anymore unless the corresponding features are enabled in rustc (mbargull) * RocksDB is not compiled with SSE4 instructions anymore unless the corresponding features are enabled in rustc (mbargull)
* Bump `librocksdb-sys` up to 6.20.3 (olegnn, akrylysov) * Bump `librocksdb-sys` up to 6.20.3 (olegnn, akrylysov)
* Add `DB::key_may_exist_cf_opt` method (stanislav-tkach) * Add `DB::key_may_exist_cf_opt` method (stanislav-tkach)
* Add `Options::set_zstd_max_train_bytes` method (stanislav-tkach) * Add `Options::set_zstd_max_train_bytes` method (stanislav-tkach)
* Mark Cache and Env as Send and Sync (akrylysov)
* Allow cloning the Cache and Env (duarten)
* Make SSE inclusion conditional for target features (mbargull)
* Use Self where possible (adamnemecek)
* Don't leak dropped column families (ryoqun)
## 0.16.0 (2021-04-18) ## 0.16.0 (2021-04-18)

@ -28,9 +28,11 @@ multi-threaded-cf = []
[dependencies] [dependencies]
libc = "0.2" libc = "0.2"
librocksdb-sys = { path = "librocksdb-sys", version = "6.19.3" } librocksdb-sys = { path = "librocksdb-sys", version = "6.20.3" }
[dev-dependencies] [dev-dependencies]
trybuild = "1.0" trybuild = "1.0"
tempfile = "3.1" tempfile = "3.1"
pretty_assertions = "0.7" pretty_assertions = "0.7"
bincode = "1.3"
serde = { version = "1", features = [ "derive" ] }

@ -29,5 +29,5 @@ uuid = { version = "0.8", features = ["v4"] }
[build-dependencies] [build-dependencies]
cc = { version = "1.0", features = ["parallel"] } cc = { version = "1.0", features = ["parallel"] }
bindgen = { version = "0.58.1", default-features = false, features = ["runtime"] } bindgen = { version = "0.59", default-features = false, features = ["runtime"] }
glob = "0.3" glob = "0.3"

@ -130,7 +130,7 @@ impl<'a> AsColumnFamilyRef for &'a ColumnFamily {
} }
// Only implement for Arc-ed BoundColumnFamily as this tightly coupled and // Only implement for Arc-ed BoundColumnFamily as this tightly coupled and
// implmetation detail, considering use of std::mem::transmute. BoundColumnFamily // implementation detail, considering use of std::mem::transmute. BoundColumnFamily
// isn't expected to be used as naked. // isn't expected to be used as naked.
// Also, ColumnFamilyRef might not be Arc<BoundColumnFamily<'a>> depending crate // Also, ColumnFamilyRef might not be Arc<BoundColumnFamily<'a>> depending crate
// feature flags so, we can't use the type alias here. // feature flags so, we can't use the type alias here.

@ -49,7 +49,7 @@ pub trait CompactionFilter {
/// ///
/// Note that RocksDB snapshots (i.e. call GetSnapshot() API on a /// Note that RocksDB snapshots (i.e. call GetSnapshot() API on a
/// DB* object) will not guarantee to preserve the state of the DB with /// DB* object) will not guarantee to preserve the state of the DB with
/// CompactionFilter. Data seen from a snapshot might disppear after a /// CompactionFilter. Data seen from a snapshot might disappear after a
/// compaction finishes. If you use snapshots, think twice about whether you /// compaction finishes. If you use snapshots, think twice about whether you
/// want to use compaction filter and whether you are using it in a safe way. /// want to use compaction filter and whether you are using it in a safe way.
/// ///
@ -158,15 +158,15 @@ fn test_filter(level: u32, key: &[u8], value: &[u8]) -> Decision {
fn compaction_filter_test() { fn compaction_filter_test() {
use crate::{Options, DB}; use crate::{Options, DB};
let path = "_rust_rocksdb_filtertest"; let path = "_rust_rocksdb_filter_test";
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(true); opts.create_if_missing(true);
opts.set_compaction_filter("test", test_filter); opts.set_compaction_filter("test", test_filter);
{ {
let db = DB::open(&opts, path).unwrap(); let db = DB::open(&opts, path).unwrap();
let _ = db.put(b"k1", b"a"); let _r = db.put(b"k1", b"a");
let _ = db.put(b"_k", b"b"); let _r = db.put(b"_k", b"b");
let _ = db.put(b"%k", b"c"); let _r = db.put(b"%k", b"c");
db.compact_range(None::<&[u8]>, None::<&[u8]>); db.compact_range(None::<&[u8]>, None::<&[u8]>);
assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a"); assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a");
assert!(db.get(b"_k").unwrap().is_none()); assert!(db.get(b"_k").unwrap().is_none());

@ -122,15 +122,15 @@ mod tests {
#[test] #[test]
fn compaction_filter_factory_test() { fn compaction_filter_factory_test() {
let path = "_rust_rocksdb_filterfactorytest"; let path = "_rust_rocksdb_filter_factory_test";
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(true); opts.create_if_missing(true);
opts.set_compaction_filter_factory(TestFactory(CString::new("TestFactory").unwrap())); opts.set_compaction_filter_factory(TestFactory(CString::new("TestFactory").unwrap()));
{ {
let db = DB::open(&opts, path).unwrap(); let db = DB::open(&opts, path).unwrap();
let _ = db.put(b"k1", b"a"); let _r = db.put(b"k1", b"a");
let _ = db.put(b"_k", b"b"); let _r = db.put(b"_rk", b"b");
let _ = db.put(b"%k", b"c"); let _r = db.put(b"%k", b"c");
db.compact_range(None::<&[u8]>, None::<&[u8]>); db.compact_range(None::<&[u8]>, None::<&[u8]>);
assert_eq!(db.get(b"%k1").unwrap(), None); assert_eq!(db.get(b"%k1").unwrap(), None);
} }

@ -184,7 +184,7 @@ impl<T: ThreadMode> DBAccess for DBWithThreadMode<T> {
/// Even with [`SingleThreaded`], almost all of RocksDB operations is /// Even with [`SingleThreaded`], almost all of RocksDB operations is
/// multi-threaded unless the underlying RocksDB instance is /// multi-threaded unless the underlying RocksDB instance is
/// specifically configured otherwise. `SingleThreaded` only forces /// specifically configured otherwise. `SingleThreaded` only forces
/// serialization of column family alternations by requring `&mut self` of DB /// serialization of column family alternations by requiring `&mut self` of DB
/// instance due to its wrapper implementation details. /// instance due to its wrapper implementation details.
/// ///
/// # Multi-threaded mode /// # Multi-threaded mode
@ -1779,7 +1779,7 @@ impl DBWithThreadMode<SingleThreaded> {
} }
/// Returns the underlying column family handle /// Returns the underlying column family handle
pub fn cf_handle<'a>(&'a self, name: &str) -> Option<&'a ColumnFamily> { pub fn cf_handle(&self, name: &str) -> Option<&ColumnFamily> {
self.cfs.cfs.get(name) self.cfs.cfs.get(name)
} }
} }

@ -471,7 +471,7 @@ impl<'a, D: DBAccess> Iterator for DBIteratorWithThreadMode<'a, D> {
} }
if self.raw.valid() { if self.raw.valid() {
// .key() and .value() only ever return None if valid == false, which we've just cheked // .key() and .value() only ever return None if valid == false, which we've just checked
Some(( Some((
Box::from(self.raw.key().unwrap()), Box::from(self.raw.key().unwrap()),
Box::from(self.raw.value().unwrap()), Box::from(self.raw.value().unwrap()),

@ -72,9 +72,9 @@ fn errors_do_stuff() {
match DB::destroy(&opts, &path) { match DB::destroy(&opts, &path) {
Err(s) => { Err(s) => {
let message = s.to_string(); let message = s.to_string();
assert!(message.find("IO error:").is_some()); assert!(message.contains("IO error:"));
assert!(message.find("_rust_rocksdb_error").is_some()); assert!(message.contains("_rust_rocksdb_error"));
assert!(message.find("/LOCK:").is_some()); assert!(message.contains("/LOCK:"));
} }
Ok(_) => panic!("should fail"), Ok(_) => panic!("should fail"),
} }
@ -680,9 +680,10 @@ fn env_and_dbpaths_test() {
} }
{ {
let mut paths = Vec::new(); let paths = vec![
paths.push(rocksdb::DBPath::new(&path1, 20 << 20).unwrap()); rocksdb::DBPath::new(&path1, 20 << 20).unwrap(),
paths.push(rocksdb::DBPath::new(&path2, 30 << 20).unwrap()); rocksdb::DBPath::new(&path2, 30 << 20).unwrap(),
];
opts.set_db_paths(&paths); opts.set_db_paths(&paths);
} }

@ -15,9 +15,8 @@
mod util; mod util;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
use rocksdb::{merge_operator::MergeFn, DBCompactionStyle, MergeOperands, Options, DB};
use rocksdb::merge_operator::MergeFn; use serde::{Deserialize, Serialize};
use rocksdb::{DBCompactionStyle, MergeOperands, Options, DB};
use util::DBPath; use util::DBPath;
fn test_provided_merge( fn test_provided_merge(
@ -77,26 +76,7 @@ fn merge_test() {
assert!(db.get(b"k1").unwrap().is_none()); assert!(db.get(b"k1").unwrap().is_none());
} }
unsafe fn to_slice<T: Sized>(p: &T) -> &[u8] { #[derive(Serialize, Deserialize, Copy, Clone, Debug, Default)]
::std::slice::from_raw_parts((p as *const T) as *const u8, ::std::mem::size_of::<T>())
}
fn from_slice<T: Sized>(s: &[u8]) -> Option<&T> {
if std::mem::size_of::<T>() == s.len() {
unsafe { Some(&*(s.as_ptr() as *const T)) }
} else {
println!(
"slice {:?} is len {}, but T is size {}",
s,
s.len(),
std::mem::size_of::<T>()
);
None
}
}
#[repr(packed)]
#[derive(Copy, Clone, Debug, Default)]
struct ValueCounts { struct ValueCounts {
num_a: u32, num_a: u32,
num_b: u32, num_b: u32,
@ -104,6 +84,16 @@ struct ValueCounts {
num_d: u32, num_d: u32,
} }
impl ValueCounts {
fn from_slice(slice: &[u8]) -> Option<Self> {
bincode::deserialize::<Self>(slice).ok()
}
fn as_bytes(&self) -> Option<Vec<u8>> {
bincode::serialize(self).ok()
}
}
fn test_counting_partial_merge( fn test_counting_partial_merge(
_new_key: &[u8], _new_key: &[u8],
_existing_val: Option<&[u8]>, _existing_val: Option<&[u8]>,
@ -124,11 +114,10 @@ fn test_counting_full_merge(
existing_val: Option<&[u8]>, existing_val: Option<&[u8]>,
operands: &mut MergeOperands, operands: &mut MergeOperands,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
let mut counts = if let Some(v) = existing_val { let mut counts = existing_val
*from_slice::<ValueCounts>(v).unwrap_or(&ValueCounts::default()) .map(|v| ValueCounts::from_slice(v))
} else { .flatten()
ValueCounts::default() .unwrap_or_default();
};
for op in operands { for op in operands {
for e in op { for e in op {
@ -141,15 +130,13 @@ fn test_counting_full_merge(
} }
} }
} }
let slc = unsafe { to_slice(&counts) };
Some(slc.to_vec()) counts.as_bytes()
} }
#[test] #[test]
#[allow(clippy::too_many_lines)]
fn counting_merge_test() { fn counting_merge_test() {
use std::sync::Arc; use std::{sync::Arc, thread};
use std::thread;
let db_path = DBPath::new("_rust_rocksdb_partial_merge_test"); let db_path = DBPath::new("_rust_rocksdb_partial_merge_test");
let mut opts = Options::default(); let mut opts = Options::default();
@ -234,35 +221,29 @@ fn counting_merge_test() {
} }
}); });
let m = db.merge(b"k1", b"b"); let m = db.merge(b"k1", b"b");
assert!(m.is_ok()); assert!(m.is_ok());
h3.join().unwrap(); h3.join().unwrap();
h1.join().unwrap(); h1.join().unwrap();
match db.get(b"k2") {
Ok(Some(value)) => match from_slice::<ValueCounts>(&*value) { let value_getter = |key| match db.get(key) {
Some(v) => unsafe { Ok(Some(value)) => ValueCounts::from_slice(&value)
assert_eq!(v.num_a, 1000); .map_or_else(|| panic!("unable to create ValueCounts from bytes"), |v| v),
assert_eq!(v.num_b, 500); Ok(None) => panic!("value not present"),
assert_eq!(v.num_c, 2000);
assert_eq!(v.num_d, 500);
},
None => panic!("Failed to get ValueCounts from db"),
},
Err(e) => panic!("error reading value {:?}", e),
_ => panic!("value not present"),
}
match db.get(b"k1") {
Ok(Some(value)) => match from_slice::<ValueCounts>(&*value) {
Some(v) => unsafe {
assert_eq!(v.num_a, 3);
assert_eq!(v.num_b, 2);
assert_eq!(v.num_c, 0);
assert_eq!(v.num_d, 1);
},
None => panic!("Failed to get ValueCounts from db"),
},
Err(e) => panic!("error reading value {:?}", e), Err(e) => panic!("error reading value {:?}", e),
_ => panic!("value not present"), };
}
let counts = value_getter(b"k2");
assert_eq!(counts.num_a, 1000);
assert_eq!(counts.num_b, 500);
assert_eq!(counts.num_c, 2000);
assert_eq!(counts.num_d, 500);
let counts = value_getter(b"k1");
assert_eq!(counts.num_a, 3);
assert_eq!(counts.num_b, 2);
assert_eq!(counts.num_c, 0);
assert_eq!(counts.num_d, 1);
} }
#[test] #[test]
@ -311,7 +292,7 @@ fn make_merge_max_with_limit(limit: u64) -> impl MergeFn + Clone {
#[test] #[test]
fn test_merge_state() { fn test_merge_state() {
use {Options, DB}; use {Options, DB};
let path = "_rust_rocksdb_mergetest_state"; let path = "_rust_rocksdb_merge_test_state";
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(true); opts.create_if_missing(true);
opts.set_merge_operator_associative("max-limit-12", make_merge_max_with_limit(12)); opts.set_merge_operator_associative("max-limit-12", make_merge_max_with_limit(12));

Loading…
Cancel
Save