From e565d68852457446390c4f76b7f3b30219224bef Mon Sep 17 00:00:00 2001 From: Jordan Terrell Date: Wed, 28 Nov 2018 17:36:58 -0600 Subject: [PATCH 1/3] Fixing rustfmt.toml and applying formatting... --- rustfmt.toml | 5 +- src/backup.rs | 8 +- src/checkpoint.rs | 22 +- src/compaction_filter.rs | 7 +- src/comparator.rs | 1 - src/db.rs | 96 +++-- src/db_options.rs | 63 +-- src/lib.rs | 22 +- src/merge_operator.rs | 722 +++++++++++++++++----------------- src/slice_transform.rs | 64 ++- tests/test_checkpoint.rs | 2 +- tests/test_column_family.rs | 55 +-- tests/test_iterator.rs | 28 +- tests/test_multithreaded.rs | 3 +- tests/test_raw_iterator.rs | 15 +- tests/test_rocksdb_options.rs | 2 +- tests/test_slice_transform.rs | 6 +- tests/util/mod.rs | 11 +- 18 files changed, 578 insertions(+), 554 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 6582e8c..4e727a0 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1 @@ -reorder_imports = true -max_width = 100 -ideal_width = 100 -trailing_comma = always +reorder_imports = true \ No newline at end of file diff --git a/src/backup.rs b/src/backup.rs index 9319f4d..3ede767 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -13,9 +13,8 @@ // limitations under the License. // - -use {DB, Error}; use ffi; +use {Error, DB}; use libc::{c_int, uint32_t}; use std::ffi::CString; @@ -45,7 +44,7 @@ impl BackupEngine { Err(_) => { return Err(Error::new( "Failed to convert path to CString \ - when opening backup engine" + when opening backup engine" .to_owned(), )) } @@ -64,8 +63,7 @@ impl BackupEngine { pub fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> { unsafe { ffi_try!(ffi::rocksdb_backup_engine_create_new_backup( - self.inner, - db.inner, + self.inner, db.inner, )); Ok(()) } diff --git a/src/checkpoint.rs b/src/checkpoint.rs index 68af220..424a5a3 100644 --- a/src/checkpoint.rs +++ b/src/checkpoint.rs @@ -13,14 +13,13 @@ // limitations under the License. // -///! Implementation of bindings to RocksDB Checkpoint[1] API -/// -/// [1]: https://github.com/facebook/rocksdb/wiki/Checkpoints - -use {DB, Error}; use ffi; use std::ffi::CString; use std::path::Path; +///! Implementation of bindings to RocksDB Checkpoint[1] API +/// +/// [1]: https://github.com/facebook/rocksdb/wiki/Checkpoints +use {Error, DB}; /// Undocumented parameter for `ffi::rocksdb_checkpoint_create` function. Zero by default. const LOG_SIZE_FOR_FLUSH: u64 = 0_u64; @@ -45,9 +44,7 @@ impl Checkpoint { return Err(Error::new("Could not create checkpoint object.".to_owned())); } - Ok(Checkpoint { - inner: checkpoint, - }) + Ok(Checkpoint { inner: checkpoint }) } /// Creates new physical DB checkpoint in directory specified by `path`. @@ -57,14 +54,17 @@ impl Checkpoint { Ok(c) => c, Err(_) => { return Err(Error::new( - "Failed to convert path to CString when creating DB checkpoint" - .to_owned(), + "Failed to convert path to CString when creating DB checkpoint".to_owned(), )); } }; unsafe { - ffi_try!(ffi::rocksdb_checkpoint_create(self.inner, cpath.as_ptr(), LOG_SIZE_FOR_FLUSH,)); + ffi_try!(ffi::rocksdb_checkpoint_create( + self.inner, + cpath.as_ptr(), + LOG_SIZE_FOR_FLUSH, + )); Ok(()) } diff --git a/src/compaction_filter.rs b/src/compaction_filter.rs index b846699..0c63b2a 100644 --- a/src/compaction_filter.rs +++ b/src/compaction_filter.rs @@ -32,7 +32,6 @@ pub enum Decision { Change(&'static [u8]), } - /// Function to filter compaction with. /// /// This function takes the level of compaction, the key, and the existing value @@ -46,8 +45,7 @@ impl CompactionFilterFn for F where F: FnMut(u32, &[u8], &[u8]) -> Decision, F: Send + 'static, -{ -} +{} pub struct CompactionFilterCallback where @@ -117,7 +115,7 @@ fn test_filter(level: u32, key: &[u8], value: &[u8]) -> Decision { #[test] fn compaction_filter_test() { - use {DB, Options}; + use {Options, DB}; let path = "_rust_rocksdb_filtertest"; let mut opts = Options::default(); @@ -133,5 +131,4 @@ fn compaction_filter_test() { assert!(db.get(b"_k").unwrap().is_none()); assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); } - } diff --git a/src/comparator.rs b/src/comparator.rs index 459a47c..7c607a0 100644 --- a/src/comparator.rs +++ b/src/comparator.rs @@ -13,7 +13,6 @@ // limitations under the License. // - use libc::{c_char, c_int, c_void, size_t}; use std::cmp::Ordering; use std::ffi::CString; diff --git a/src/db.rs b/src/db.rs index 4839189..e264f4c 100644 --- a/src/db.rs +++ b/src/db.rs @@ -13,13 +13,13 @@ // limitations under the License. // - -use {DB, Error, Options, WriteOptions, ColumnFamily, ColumnFamilyDescriptor}; use ffi; use ffi_util::opt_bytes_to_ptr; +use {ColumnFamily, ColumnFamilyDescriptor, Error, Options, WriteOptions, DB}; use libc::{self, c_char, c_int, c_uchar, c_void, size_t}; use std::collections::BTreeMap; +use std::ffi::CStr; use std::ffi::CString; use std::fmt; use std::fs; @@ -28,7 +28,6 @@ use std::path::Path; use std::ptr; use std::slice; use std::str; -use std::ffi::CStr; pub fn new_bloom_filter(bits: c_int) -> *mut ffi::rocksdb_filterpolicy_t { unsafe { ffi::rocksdb_filterpolicy_create_bloom(bits) } @@ -150,7 +149,6 @@ pub struct DBRawIterator { inner: *mut ffi::rocksdb_iterator_t, } - /// An iterator over a database or column family, with specifiable /// ranges and direction. /// @@ -201,7 +199,11 @@ pub enum IteratorMode<'a> { impl DBRawIterator { fn new(db: &DB, readopts: &ReadOptions) -> DBRawIterator { - unsafe { DBRawIterator { inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner) } } + unsafe { + DBRawIterator { + inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner), + } + } } fn new_cf( @@ -581,10 +583,13 @@ impl<'a> Drop for Snapshot<'a> { impl ColumnFamilyDescriptor { // Create a new column family descriptor with the specified name and options. - pub fn new(name: S, options: Options) -> Self where S: Into { + pub fn new(name: S, options: Options) -> Self + where + S: Into, + { ColumnFamilyDescriptor { name: name.into(), - options + options, } } } @@ -606,20 +611,28 @@ impl DB { /// /// Column families opened using this function will be created with default `Options`. pub fn open_cf>(opts: &Options, path: P, cfs: &[&str]) -> Result { - let cfs_v = cfs.to_vec().iter().map(|name| ColumnFamilyDescriptor::new(*name, Options::default())).collect(); + let cfs_v = cfs + .to_vec() + .iter() + .map(|name| ColumnFamilyDescriptor::new(*name, Options::default())) + .collect(); DB::open_cf_descriptors(opts, path, cfs_v) } /// Open a database with the given database options and column family names/options. - pub fn open_cf_descriptors>(opts: &Options, path: P, cfs: Vec) -> Result { + pub fn open_cf_descriptors>( + opts: &Options, + path: P, + cfs: Vec, + ) -> Result { let path = path.as_ref(); let cpath = match CString::new(path.to_string_lossy().as_bytes()) { Ok(c) => c, Err(_) => { return Err(Error::new( "Failed to convert path to CString \ - when opening DB." + when opening DB." .to_owned(), )) } @@ -628,7 +641,7 @@ impl DB { if let Err(e) = fs::create_dir_all(&path) { return Err(Error::new(format!( "Failed to create RocksDB\ - directory: `{:?}`.", + directory: `{:?}`.", e ))); } @@ -646,7 +659,7 @@ impl DB { if !cfs_v.iter().any(|cf| cf.name == "default") { cfs_v.push(ColumnFamilyDescriptor { name: String::from("default"), - options: Options::default() + options: Options::default(), }); } // We need to store our CStrings in an intermediate vector @@ -661,7 +674,8 @@ impl DB { // These handles will be populated by DB. let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect(); - let mut cfopts: Vec<_> = cfs_v.iter() + let mut cfopts: Vec<_> = cfs_v + .iter() .map(|cf| cf.options.inner as *const _) .collect(); @@ -672,14 +686,15 @@ impl DB { cfs_v.len() as c_int, cfnames.as_mut_ptr(), cfopts.as_mut_ptr(), - cfhandles.as_mut_ptr(),)); + cfhandles.as_mut_ptr(), + )); } for handle in &cfhandles { if handle.is_null() { return Err(Error::new( "Received null column family \ - handle from DB." + handle from DB." .to_owned(), )); } @@ -707,7 +722,7 @@ impl DB { Err(_) => { return Err(Error::new( "Failed to convert path to CString \ - when opening DB." + when opening DB." .to_owned(), )) } @@ -731,7 +746,6 @@ impl DB { } } - pub fn destroy>(opts: &Options, path: P) -> Result<(), Error> { let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap(); unsafe { @@ -773,10 +787,10 @@ impl DB { if readopts.inner.is_null() { return Err(Error::new( "Unable to create RocksDB read options. \ - This is a fairly trivial call, and its \ - failure may be indicative of a \ - mis-compiled or mis-loaded RocksDB \ - library." + This is a fairly trivial call, and its \ + failure may be indicative of a \ + mis-compiled or mis-loaded RocksDB \ + library." .to_owned(), )); } @@ -812,10 +826,10 @@ impl DB { if readopts.inner.is_null() { return Err(Error::new( "Unable to create RocksDB read options. \ - This is a fairly trivial call, and its \ - failure may be indicative of a \ - mis-compiled or mis-loaded RocksDB \ - library." + This is a fairly trivial call, and its \ + failure may be indicative of a \ + mis-compiled or mis-loaded RocksDB \ + library." .to_owned(), )); } @@ -848,7 +862,7 @@ impl DB { Err(_) => { return Err(Error::new( "Failed to convert path to CString \ - when opening rocksdb" + when opening rocksdb" .to_owned(), )) } @@ -929,11 +943,16 @@ impl DB { pub fn prefix_iterator_cf<'a>( &self, cf_handle: ColumnFamily, - prefix: &'a [u8] + prefix: &'a [u8], ) -> Result { let mut opts = ReadOptions::default(); opts.set_prefix_same_as_start(true); - DBIterator::new_cf(self, cf_handle, &opts, IteratorMode::From(prefix, Direction::Forward)) + DBIterator::new_cf( + self, + cf_handle, + &opts, + IteratorMode::From(prefix, Direction::Forward), + ) } pub fn raw_iterator(&self) -> DBRawIterator { @@ -1207,7 +1226,9 @@ impl WriteBatch { impl Default for WriteBatch { fn default() -> WriteBatch { - WriteBatch { inner: unsafe { ffi::rocksdb_writebatch_create() } } + WriteBatch { + inner: unsafe { ffi::rocksdb_writebatch_create() }, + } } } @@ -1268,21 +1289,21 @@ impl ReadOptions { } pub fn set_prefix_same_as_start(&mut self, v: bool) { - unsafe { - ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, v as c_uchar) - } + unsafe { ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, v as c_uchar) } } - pub fn set_total_order_seek(&mut self, v:bool) { - unsafe { - ffi::rocksdb_readoptions_set_total_order_seek(self.inner, v as c_uchar) - } + pub fn set_total_order_seek(&mut self, v: bool) { + unsafe { ffi::rocksdb_readoptions_set_total_order_seek(self.inner, v as c_uchar) } } } impl Default for ReadOptions { fn default() -> ReadOptions { - unsafe { ReadOptions { inner: ffi::rocksdb_readoptions_create() } } + unsafe { + ReadOptions { + inner: ffi::rocksdb_readoptions_create(), + } + } } } @@ -1352,7 +1373,6 @@ fn test_db_vector() { assert_eq!(&*v, &ctrl[..]); } - #[test] fn external() { let path = "_rust_rocksdb_externaltest"; diff --git a/src/db_options.rs b/src/db_options.rs index e1ef2fd..925eb5a 100644 --- a/src/db_options.rs +++ b/src/db_options.rs @@ -18,14 +18,17 @@ use std::path::Path; use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t}; -use ffi; -use {BlockBasedOptions, BlockBasedIndexType, DBCompactionStyle, DBCompressionType, DBRecoveryMode, MemtableFactory, - Options, WriteOptions}; -use compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn, filter_callback}; +use compaction_filter::{self, filter_callback, CompactionFilterCallback, CompactionFilterFn}; use comparator::{self, ComparatorCallback, CompareFn}; -use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback, - partial_merge_callback}; +use ffi; +use merge_operator::{ + self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback, +}; use slice_transform::SliceTransform; +use { + BlockBasedIndexType, BlockBasedOptions, DBCompactionStyle, DBCompressionType, DBRecoveryMode, + MemtableFactory, Options, WriteOptions, +}; pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t { unsafe { ffi::rocksdb_cache_create_lru(capacity) } @@ -189,7 +192,10 @@ impl Options { /// ``` pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) { unsafe { - ffi::rocksdb_options_set_create_missing_column_families(self.inner, create_missing_cfs as c_uchar); + ffi::rocksdb_options_set_create_missing_column_families( + self.inner, + create_missing_cfs as c_uchar, + ); } } @@ -256,14 +262,19 @@ impl Options { /// Default: `0` pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) { unsafe { - ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size as usize); + ffi::rocksdb_options_compaction_readahead_size( + self.inner, + compaction_readahead_size as usize, + ); } } - pub fn set_merge_operator(&mut self, name: &str, - full_merge_fn: MergeFn, - partial_merge_fn: Option) { - + pub fn set_merge_operator( + &mut self, + name: &str, + full_merge_fn: MergeFn, + partial_merge_fn: Option, + ) { let cb = Box::new(MergeOperatorCallback { name: CString::new(name.as_bytes()).unwrap(), full_merge_fn: full_merge_fn, @@ -283,8 +294,10 @@ impl Options { } } - #[deprecated(since = "0.5.0", - note = "add_merge_operator has been renamed to set_merge_operator")] + #[deprecated( + since = "0.5.0", + note = "add_merge_operator has been renamed to set_merge_operator" + )] pub fn add_merge_operator(&mut self, name: &str, merge_fn: MergeFn) { self.set_merge_operator(name, merge_fn, None); } @@ -343,14 +356,13 @@ impl Options { } pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) { - unsafe { - ffi::rocksdb_options_set_prefix_extractor( - self.inner, prefix_extractor.inner - ) - } + unsafe { ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner) } } - #[deprecated(since = "0.5.0", note = "add_comparator has been renamed to set_comparator")] + #[deprecated( + since = "0.5.0", + note = "add_comparator has been renamed to set_comparator" + )] pub fn add_comparator(&mut self, name: &str, compare_fn: CompareFn) { self.set_comparator(name, compare_fn); } @@ -532,8 +544,10 @@ impl Options { /// let mut opts = Options::default(); /// opts.set_allow_os_buffer(false); /// ``` - #[deprecated(since = "0.7.0", - note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods")] + #[deprecated( + since = "0.7.0", + note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods" + )] pub fn set_allow_os_buffer(&mut self, is_allow: bool) { self.set_use_direct_reads(!is_allow); self.set_use_direct_io_for_flush_and_compaction(!is_allow); @@ -844,7 +858,6 @@ impl Options { } } - /// Sets the maximum number of concurrent background compaction jobs, submitted to /// the default LOW priority thread pool. /// We first try to schedule compactions based on @@ -1049,9 +1062,7 @@ impl Options { /// /// Default: `true` pub fn set_advise_random_on_open(&mut self, advise: bool) { - unsafe { - ffi::rocksdb_options_set_advise_random_on_open(self.inner, advise as c_uchar) - } + unsafe { ffi::rocksdb_options_set_advise_random_on_open(self.inner, advise as c_uchar) } } /// Sets the number of levels for this database. diff --git a/src/lib.rs b/src/lib.rs index d0c0a1f..baf1520 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -54,17 +54,18 @@ mod ffi_util; pub mod backup; pub mod checkpoint; -mod comparator; -pub mod merge_operator; pub mod compaction_filter; +mod comparator; mod db; mod db_options; +pub mod merge_operator; mod slice_transform; pub use compaction_filter::Decision as CompactionDecision; -pub use db::{DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode, - DBVector, ReadOptions, Direction, IteratorMode, Snapshot, WriteBatch, - new_bloom_filter}; +pub use db::{ + new_bloom_filter, DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, + DBRecoveryMode, DBVector, Direction, IteratorMode, ReadOptions, Snapshot, WriteBatch, +}; pub use slice_transform::SliceTransform; @@ -155,8 +156,14 @@ pub enum BlockBasedIndexType { /// See https://github.com/facebook/rocksdb/wiki/MemTable for more information. pub enum MemtableFactory { Vector, - HashSkipList { bucket_count: usize, height: i32, branching_factor: i32 }, - HashLinkList { bucket_count: usize } + HashSkipList { + bucket_count: usize, + height: i32, + branching_factor: i32, + }, + HashLinkList { + bucket_count: usize, + }, } /// Database-wide options around performance and behavior. @@ -222,7 +229,6 @@ pub struct WriteOptions { inner: *mut ffi::rocksdb_writeoptions_t, } - /// An opaque type used to represent a column family. Returned from some functions, and used /// in others #[derive(Copy, Clone)] diff --git a/src/merge_operator.rs b/src/merge_operator.rs index 70a202c..69551d8 100644 --- a/src/merge_operator.rs +++ b/src/merge_operator.rs @@ -53,7 +53,6 @@ //! } //! ``` - use libc::{self, c_char, c_int, c_void, size_t}; use std::ffi::CString; use std::mem; @@ -63,394 +62,391 @@ use std::slice; pub type MergeFn = fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Option>; pub struct MergeOperatorCallback { - pub name: CString, - pub full_merge_fn: MergeFn, - pub partial_merge_fn: MergeFn, + pub name: CString, + pub full_merge_fn: MergeFn, + pub partial_merge_fn: MergeFn, } pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { - let _: Box = mem::transmute(raw_cb); + let _: Box = mem::transmute(raw_cb); } pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char { - let cb = &mut *(raw_cb as *mut MergeOperatorCallback); - cb.name.as_ptr() + let cb = &mut *(raw_cb as *mut MergeOperatorCallback); + cb.name.as_ptr() } pub unsafe extern "C" fn full_merge_callback( - raw_cb: *mut c_void, - raw_key: *const c_char, - key_len: size_t, - existing_value: *const c_char, - existing_value_len: size_t, - operands_list: *const *const c_char, - operands_list_len: *const size_t, - num_operands: c_int, - success: *mut u8, - new_value_length: *mut size_t, - ) -> *mut c_char { - let cb = &mut *(raw_cb as *mut MergeOperatorCallback); - let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); - let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); - let oldval = - if existing_value == ptr::null() { - None - } else { - Some(slice::from_raw_parts(existing_value as *const u8, existing_value_len as usize)) - }; - if let Some(mut result) = (cb.full_merge_fn)(key, oldval, operands) { - result.shrink_to_fit(); - // TODO(tan) investigate zero-copy techniques to improve performance - let buf = libc::malloc(result.len() as size_t); - assert!(!buf.is_null()); - *new_value_length = result.len() as size_t; - *success = 1 as u8; - ptr::copy(result.as_ptr() as *mut c_void, &mut *buf, result.len()); - buf as *mut c_char - } else { - *success = 0 as u8; - ptr::null_mut() as *mut c_char - } + raw_cb: *mut c_void, + raw_key: *const c_char, + key_len: size_t, + existing_value: *const c_char, + existing_value_len: size_t, + operands_list: *const *const c_char, + operands_list_len: *const size_t, + num_operands: c_int, + success: *mut u8, + new_value_length: *mut size_t, +) -> *mut c_char { + let cb = &mut *(raw_cb as *mut MergeOperatorCallback); + let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); + let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); + let oldval = if existing_value == ptr::null() { + None + } else { + Some(slice::from_raw_parts( + existing_value as *const u8, + existing_value_len as usize, + )) + }; + if let Some(mut result) = (cb.full_merge_fn)(key, oldval, operands) { + result.shrink_to_fit(); + // TODO(tan) investigate zero-copy techniques to improve performance + let buf = libc::malloc(result.len() as size_t); + assert!(!buf.is_null()); + *new_value_length = result.len() as size_t; + *success = 1 as u8; + ptr::copy(result.as_ptr() as *mut c_void, &mut *buf, result.len()); + buf as *mut c_char + } else { + *success = 0 as u8; + ptr::null_mut() as *mut c_char + } } pub unsafe extern "C" fn partial_merge_callback( - raw_cb: *mut c_void, - raw_key: *const c_char, - key_len: size_t, - operands_list: *const *const c_char, - operands_list_len: *const size_t, - num_operands: c_int, - success: *mut u8, - new_value_length: *mut size_t, - ) -> *mut c_char { - let cb = &mut *(raw_cb as *mut MergeOperatorCallback); - let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); - let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); - if let Some(mut result) = (cb.partial_merge_fn)(key, None, operands) { - result.shrink_to_fit(); - // TODO(tan) investigate zero-copy techniques to improve performance - let buf = libc::malloc(result.len() as size_t); - assert!(!buf.is_null()); - *new_value_length = result.len() as size_t; - *success = 1 as u8; - ptr::copy(result.as_ptr() as *mut c_void, &mut *buf, result.len()); - buf as *mut c_char - } else { - *success = 0 as u8; - ptr::null_mut::() - } + raw_cb: *mut c_void, + raw_key: *const c_char, + key_len: size_t, + operands_list: *const *const c_char, + operands_list_len: *const size_t, + num_operands: c_int, + success: *mut u8, + new_value_length: *mut size_t, +) -> *mut c_char { + let cb = &mut *(raw_cb as *mut MergeOperatorCallback); + let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); + let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); + if let Some(mut result) = (cb.partial_merge_fn)(key, None, operands) { + result.shrink_to_fit(); + // TODO(tan) investigate zero-copy techniques to improve performance + let buf = libc::malloc(result.len() as size_t); + assert!(!buf.is_null()); + *new_value_length = result.len() as size_t; + *success = 1 as u8; + ptr::copy(result.as_ptr() as *mut c_void, &mut *buf, result.len()); + buf as *mut c_char + } else { + *success = 0 as u8; + ptr::null_mut::() + } } - pub struct MergeOperands { - operands_list: *const *const c_char, - operands_list_len: *const size_t, - num_operands: usize, - cursor: usize, + operands_list: *const *const c_char, + operands_list_len: *const size_t, + num_operands: usize, + cursor: usize, } impl MergeOperands { - fn new( - operands_list: *const *const c_char, - operands_list_len: *const size_t, - num_operands: c_int, - ) -> MergeOperands { - assert!(num_operands >= 0); - MergeOperands { - operands_list: operands_list, - operands_list_len: operands_list_len, - num_operands: num_operands as usize, - cursor: 0, - } - } + fn new( + operands_list: *const *const c_char, + operands_list_len: *const size_t, + num_operands: c_int, + ) -> MergeOperands { + assert!(num_operands >= 0); + MergeOperands { + operands_list: operands_list, + operands_list_len: operands_list_len, + num_operands: num_operands as usize, + cursor: 0, + } + } } impl<'a> Iterator for &'a mut MergeOperands { - type Item = &'a [u8]; - - fn next(&mut self) -> Option<&'a [u8]> { - if self.cursor == self.num_operands { - None - } else { - unsafe { - let base = self.operands_list as usize; - let base_len = self.operands_list_len as usize; - let spacing = mem::size_of::<*const *const u8>(); - let spacing_len = mem::size_of::<*const size_t>(); - let len_ptr = (base_len + (spacing_len * self.cursor)) as *const size_t; - let len = *len_ptr as usize; - let ptr = base + (spacing * self.cursor); - self.cursor += 1; - Some(mem::transmute(slice::from_raw_parts( - *(ptr as *const *const u8) as *const u8, - len, - ))) - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.num_operands - self.cursor; - (remaining, Some(remaining)) - } + type Item = &'a [u8]; + + fn next(&mut self) -> Option<&'a [u8]> { + if self.cursor == self.num_operands { + None + } else { + unsafe { + let base = self.operands_list as usize; + let base_len = self.operands_list_len as usize; + let spacing = mem::size_of::<*const *const u8>(); + let spacing_len = mem::size_of::<*const size_t>(); + let len_ptr = (base_len + (spacing_len * self.cursor)) as *const size_t; + let len = *len_ptr as usize; + let ptr = base + (spacing * self.cursor); + self.cursor += 1; + Some(mem::transmute(slice::from_raw_parts( + *(ptr as *const *const u8) as *const u8, + len, + ))) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.num_operands - self.cursor; + (remaining, Some(remaining)) + } } #[cfg(test)] mod test { - use super::*; - - fn test_provided_merge( - _new_key: &[u8], - existing_val: Option<&[u8]>, - operands: &mut MergeOperands, - ) -> Option> { - let nops = operands.size_hint().0; - let mut result: Vec = Vec::with_capacity(nops); - if let Some(v) = existing_val { - for e in v { - result.push(*e); - } - } - for op in operands { - for e in op { - result.push(*e); - } - } - Some(result) - } - -#[test] - fn mergetest() { - use {DB, Options}; - - let path = "_rust_rocksdb_mergetest"; - let mut opts = Options::default(); - opts.create_if_missing(true); - opts.set_merge_operator("test operator", test_provided_merge, None); - { - let db = DB::open(&opts, path).unwrap(); - let p = db.put(b"k1", b"a"); - assert!(p.is_ok()); - let _ = db.merge(b"k1", b"b"); - let _ = db.merge(b"k1", b"c"); - let _ = db.merge(b"k1", b"d"); - let _ = db.merge(b"k1", b"efg"); - let m = db.merge(b"k1", b"h"); - assert!(m.is_ok()); - match db.get(b"k1") { - Ok(Some(value)) => { - match value.to_utf8() { - Some(v) => println!("retrieved utf8 value: {}", v), - None => println!("did not read valid utf-8 out of the db"), - } - } - Err(_) => println!("error reading value"), - _ => panic!("value not present"), - } - - assert!(m.is_ok()); - let r = db.get(b"k1"); - assert!(r.unwrap().unwrap().to_utf8().unwrap() == "abcdefgh"); - assert!(db.delete(b"k1").is_ok()); - assert!(db.get(b"k1").unwrap().is_none()); - } - assert!(DB::destroy(&opts, path).is_ok()); - } - - unsafe fn to_slice(p: &T) -> &[u8] { - ::std::slice::from_raw_parts( - (p as *const T) as *const u8, - ::std::mem::size_of::(), - ) - } - - fn from_slice(s: &[u8]) -> Option<&T> { - if ::std::mem::size_of::() != s.len() { - println!("slice {:?} is len {}, but T is size {}", s, s.len(), ::std::mem::size_of::()); - None - } else { - unsafe { - Some(::std::mem::transmute(s.as_ptr())) - } - } - } - -#[repr(packed)] - - #[derive(Copy, Clone, Debug)] - struct ValueCounts { - num_a: u32, - num_b: u32, - num_c: u32, - num_d: u32, - } - - fn test_counting_partial_merge( - _new_key: &[u8], - _existing_val: Option<&[u8]>, - operands: &mut MergeOperands, - ) -> Option> { - let nops = operands.size_hint().0; - let mut result: Vec = Vec::with_capacity(nops); - for op in operands { - for e in op { - result.push(*e); - } - } - Some(result) - } - - fn test_counting_full_merge( - _new_key: &[u8], - existing_val: Option<&[u8]>, - operands: &mut MergeOperands, - ) -> Option> { - - let mut counts : ValueCounts = - if let Some(v) = existing_val { - from_slice::(v).unwrap().clone() - } else { - ValueCounts { - num_a: 0, - num_b: 0, - num_c: 0, - num_d: 0 } - }; - - for op in operands { - for e in op { - match *e { - b'a' => counts.num_a += 1, - b'b' => counts.num_b += 1, - b'c' => counts.num_c += 1, - b'd' => counts.num_d += 1, - _ => {} - } - } - } - let slc = unsafe { to_slice(&counts) }; - Some(slc.to_vec()) - } - -#[test] - fn counting_mergetest() { - use std::thread; - use std::sync::Arc; - use {DB, Options, DBCompactionStyle}; - - let path = "_rust_rocksdb_partial_mergetest"; - let mut opts = Options::default(); - opts.create_if_missing(true); - opts.set_compaction_style(DBCompactionStyle::Universal); - opts.set_min_write_buffer_number_to_merge(10); - - opts.set_merge_operator("sort operator", test_counting_full_merge, Some(test_counting_partial_merge)); - { - let db = Arc::new(DB::open(&opts, path).unwrap()); - let _ = db.delete(b"k1"); - let _ = db.delete(b"k2"); - let _ = db.merge(b"k1", b"a"); - let _ = db.merge(b"k1", b"b"); - let _ = db.merge(b"k1", b"d"); - let _ = db.merge(b"k1", b"a"); - let _ = db.merge(b"k1", b"a"); - let _ = db.merge(b"k1", b"efg"); - for i in 0..500 { - let _ = db.merge(b"k2", b"c"); - if i % 20 == 0 { + use super::*; + + fn test_provided_merge( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &mut MergeOperands, + ) -> Option> { + let nops = operands.size_hint().0; + let mut result: Vec = Vec::with_capacity(nops); + if let Some(v) = existing_val { + for e in v { + result.push(*e); + } + } + for op in operands { + for e in op { + result.push(*e); + } + } + Some(result) + } + + #[test] + fn mergetest() { + use {Options, DB}; + + let path = "_rust_rocksdb_mergetest"; + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_merge_operator("test operator", test_provided_merge, None); + { + let db = DB::open(&opts, path).unwrap(); + let p = db.put(b"k1", b"a"); + assert!(p.is_ok()); + let _ = db.merge(b"k1", b"b"); + let _ = db.merge(b"k1", b"c"); + let _ = db.merge(b"k1", b"d"); + let _ = db.merge(b"k1", b"efg"); + let m = db.merge(b"k1", b"h"); + assert!(m.is_ok()); + match db.get(b"k1") { + Ok(Some(value)) => match value.to_utf8() { + Some(v) => println!("retrieved utf8 value: {}", v), + None => println!("did not read valid utf-8 out of the db"), + }, + Err(_) => println!("error reading value"), + _ => panic!("value not present"), + } + + assert!(m.is_ok()); + let r = db.get(b"k1"); + assert!(r.unwrap().unwrap().to_utf8().unwrap() == "abcdefgh"); + assert!(db.delete(b"k1").is_ok()); + assert!(db.get(b"k1").unwrap().is_none()); + } + assert!(DB::destroy(&opts, path).is_ok()); + } + + unsafe fn to_slice(p: &T) -> &[u8] { + ::std::slice::from_raw_parts((p as *const T) as *const u8, ::std::mem::size_of::()) + } + + fn from_slice(s: &[u8]) -> Option<&T> { + if ::std::mem::size_of::() != s.len() { + println!( + "slice {:?} is len {}, but T is size {}", + s, + s.len(), + ::std::mem::size_of::() + ); + None + } else { + unsafe { Some(::std::mem::transmute(s.as_ptr())) } + } + } + + #[repr(packed)] + #[derive(Copy, Clone, Debug)] + struct ValueCounts { + num_a: u32, + num_b: u32, + num_c: u32, + num_d: u32, + } + + fn test_counting_partial_merge( + _new_key: &[u8], + _existing_val: Option<&[u8]>, + operands: &mut MergeOperands, + ) -> Option> { + let nops = operands.size_hint().0; + let mut result: Vec = Vec::with_capacity(nops); + for op in operands { + for e in op { + result.push(*e); + } + } + Some(result) + } + + fn test_counting_full_merge( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &mut MergeOperands, + ) -> Option> { + let mut counts: ValueCounts = if let Some(v) = existing_val { + from_slice::(v).unwrap().clone() + } else { + ValueCounts { + num_a: 0, + num_b: 0, + num_c: 0, + num_d: 0, + } + }; + + for op in operands { + for e in op { + match *e { + b'a' => counts.num_a += 1, + b'b' => counts.num_b += 1, + b'c' => counts.num_c += 1, + b'd' => counts.num_d += 1, + _ => {} + } + } + } + let slc = unsafe { to_slice(&counts) }; + Some(slc.to_vec()) + } + + #[test] + fn counting_mergetest() { + use std::sync::Arc; + use std::thread; + use {DBCompactionStyle, Options, DB}; + + let path = "_rust_rocksdb_partial_mergetest"; + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_compaction_style(DBCompactionStyle::Universal); + opts.set_min_write_buffer_number_to_merge(10); + + opts.set_merge_operator( + "sort operator", + test_counting_full_merge, + Some(test_counting_partial_merge), + ); + { + let db = Arc::new(DB::open(&opts, path).unwrap()); + let _ = db.delete(b"k1"); + let _ = db.delete(b"k2"); + let _ = db.merge(b"k1", b"a"); + let _ = db.merge(b"k1", b"b"); + let _ = db.merge(b"k1", b"d"); + let _ = db.merge(b"k1", b"a"); + let _ = db.merge(b"k1", b"a"); + let _ = db.merge(b"k1", b"efg"); + for i in 0..500 { + let _ = db.merge(b"k2", b"c"); + if i % 20 == 0 { let _ = db.get(b"k2"); - } - } - for i in 0..500 { - let _ = db.merge(b"k2", b"c"); - if i % 20 == 0 { + } + } + for i in 0..500 { + let _ = db.merge(b"k2", b"c"); + if i % 20 == 0 { let _ = db.get(b"k2"); - } - } - db.compact_range(None, None); - let d1 = db.clone(); - let d2 = db.clone(); - let d3 = db.clone(); - let h1 = thread::spawn(move || { - for i in 0..500 { - let _ = d1.merge(b"k2", b"c"); - if i % 20 == 0 { - let _ = d1.get(b"k2"); - } - } - for i in 0..500 { - let _ = d1.merge(b"k2", b"a"); - if i % 20 == 0 { - let _ = d1.get(b"k2"); - } - } - }); - let h2 = thread::spawn(move || { - for i in 0..500 { - let _ = d2.merge(b"k2", b"b"); - if i % 20 == 0 { - let _ = d2.get(b"k2"); - } - } - for i in 0..500 { - let _ = d2.merge(b"k2", b"d"); - if i % 20 == 0 { - let _ = d2.get(b"k2"); - } - } - d2.compact_range(None, None); - }); - h2.join().unwrap(); - let h3 = thread::spawn(move || { - for i in 0..500 { - let _ = d3.merge(b"k2", b"a"); - if i % 20 == 0 { - let _ = d3.get(b"k2"); - } - } - for i in 0..500 { - let _ = d3.merge(b"k2", b"c"); - if i % 20 == 0 { - let _ = d3.get(b"k2"); - } - } - }); - let m = db.merge(b"k1", b"b"); - assert!(m.is_ok()); - h3.join().unwrap(); - h1.join().unwrap(); - match db.get(b"k2") { - Ok(Some(value)) => { - match from_slice::(&*value) { - Some(v) => unsafe { - assert_eq!(v.num_a, 1000); - assert_eq!(v.num_b, 500); - assert_eq!(v.num_c, 2000); - assert_eq!(v.num_d, 500); - }, - None => panic!("Failed to get ValueCounts from db"), - } - } - Err(e) => panic!("error reading value {:?}", e), - _ => panic!("value not present"), - } - match db.get(b"k1") { - Ok(Some(value)) => { - match from_slice::(&*value) { - Some(v) => unsafe { - assert_eq!(v.num_a, 3); - assert_eq!(v.num_b, 2); - assert_eq!(v.num_c, 0); - assert_eq!(v.num_d, 1); - }, - None => panic!("Failed to get ValueCounts from db"), - } - } - Err(e) => panic!("error reading value {:?}", e), - _ => panic!("value not present"), - } - } - assert!(DB::destroy(&opts, path).is_ok()); - } + } + } + db.compact_range(None, None); + let d1 = db.clone(); + let d2 = db.clone(); + let d3 = db.clone(); + let h1 = thread::spawn(move || { + for i in 0..500 { + let _ = d1.merge(b"k2", b"c"); + if i % 20 == 0 { + let _ = d1.get(b"k2"); + } + } + for i in 0..500 { + let _ = d1.merge(b"k2", b"a"); + if i % 20 == 0 { + let _ = d1.get(b"k2"); + } + } + }); + let h2 = thread::spawn(move || { + for i in 0..500 { + let _ = d2.merge(b"k2", b"b"); + if i % 20 == 0 { + let _ = d2.get(b"k2"); + } + } + for i in 0..500 { + let _ = d2.merge(b"k2", b"d"); + if i % 20 == 0 { + let _ = d2.get(b"k2"); + } + } + d2.compact_range(None, None); + }); + h2.join().unwrap(); + let h3 = thread::spawn(move || { + for i in 0..500 { + let _ = d3.merge(b"k2", b"a"); + if i % 20 == 0 { + let _ = d3.get(b"k2"); + } + } + for i in 0..500 { + let _ = d3.merge(b"k2", b"c"); + if i % 20 == 0 { + let _ = d3.get(b"k2"); + } + } + }); + let m = db.merge(b"k1", b"b"); + assert!(m.is_ok()); + h3.join().unwrap(); + h1.join().unwrap(); + match db.get(b"k2") { + Ok(Some(value)) => match from_slice::(&*value) { + Some(v) => unsafe { + assert_eq!(v.num_a, 1000); + assert_eq!(v.num_b, 500); + assert_eq!(v.num_c, 2000); + assert_eq!(v.num_d, 500); + }, + None => panic!("Failed to get ValueCounts from db"), + }, + Err(e) => panic!("error reading value {:?}", e), + _ => panic!("value not present"), + } + match db.get(b"k1") { + Ok(Some(value)) => match from_slice::(&*value) { + Some(v) => unsafe { + assert_eq!(v.num_a, 3); + assert_eq!(v.num_b, 2); + assert_eq!(v.num_c, 0); + assert_eq!(v.num_d, 1); + }, + None => panic!("Failed to get ValueCounts from db"), + }, + Err(e) => panic!("error reading value {:?}", e), + _ => panic!("value not present"), + } + } + assert!(DB::destroy(&opts, path).is_ok()); + } } diff --git a/src/slice_transform.rs b/src/slice_transform.rs index f834be3..d3dcb23 100644 --- a/src/slice_transform.rs +++ b/src/slice_transform.rs @@ -33,14 +33,14 @@ pub struct SliceTransform { // through to rocksdb_slicetransform_destroy because // this is currently only used (to my knowledge) // by people passing it as a prefix extractor when -// opening a DB. +// opening a DB. impl SliceTransform { pub fn create( name: &str, transform_fn: TransformFn, in_domain_fn: Option, - ) -> SliceTransform{ + ) -> SliceTransform { let cb = Box::new(TransformCallback { name: CString::new(name.as_bytes()).unwrap(), transform_fn: transform_fn, @@ -48,11 +48,10 @@ impl SliceTransform { }); let st = unsafe { - ffi::rocksdb_slicetransform_create( + ffi::rocksdb_slicetransform_create( mem::transmute(cb), Some(slice_transform_destructor_callback), Some(transform_callback), - // this is ugly, but I can't get the compiler // not to barf with "expected fn pointer, found fn item" // without this. sorry. @@ -61,31 +60,24 @@ impl SliceTransform { } else { None }, - // this None points to the deprecated InRange callback None, Some(slice_transform_name_callback), ) }; - SliceTransform { - inner: st - } + SliceTransform { inner: st } } pub fn create_fixed_prefix(len: size_t) -> SliceTransform { SliceTransform { - inner: unsafe { - ffi::rocksdb_slicetransform_create_fixed_prefix(len) - }, + inner: unsafe { ffi::rocksdb_slicetransform_create_fixed_prefix(len) }, } } pub fn create_noop() -> SliceTransform { SliceTransform { - inner: unsafe { - ffi::rocksdb_slicetransform_create_noop() - }, + inner: unsafe { ffi::rocksdb_slicetransform_create_noop() }, } } } @@ -94,34 +86,30 @@ pub type TransformFn = fn(&[u8]) -> Vec; pub type InDomainFn = fn(&[u8]) -> bool; pub struct TransformCallback { - pub name: CString, - pub transform_fn: TransformFn, - pub in_domain_fn: Option, + pub name: CString, + pub transform_fn: TransformFn, + pub in_domain_fn: Option, } -pub unsafe extern "C" fn slice_transform_destructor_callback( - raw_cb: *mut c_void -) { - let transform: Box = mem::transmute(raw_cb); - drop(transform); +pub unsafe extern "C" fn slice_transform_destructor_callback(raw_cb: *mut c_void) { + let transform: Box = mem::transmute(raw_cb); + drop(transform); } -pub unsafe extern "C" fn slice_transform_name_callback( - raw_cb: *mut c_void -) -> *const c_char { - let cb = &mut *(raw_cb as *mut TransformCallback); - cb.name.as_ptr() +pub unsafe extern "C" fn slice_transform_name_callback(raw_cb: *mut c_void) -> *const c_char { + let cb = &mut *(raw_cb as *mut TransformCallback); + cb.name.as_ptr() } pub unsafe extern "C" fn transform_callback( - raw_cb: *mut c_void, - raw_key: *const c_char, - key_len: size_t, - dst_length: *mut size_t, + raw_cb: *mut c_void, + raw_key: *const c_char, + key_len: size_t, + dst_length: *mut size_t, ) -> *mut c_char { - let cb = &mut *(raw_cb as *mut TransformCallback); - let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); - let mut result = (cb.transform_fn)(key); + let cb = &mut *(raw_cb as *mut TransformCallback); + let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); + let mut result = (cb.transform_fn)(key); result.shrink_to_fit(); // copy the result into a C++ destroyable buffer @@ -135,11 +123,11 @@ pub unsafe extern "C" fn transform_callback( pub unsafe extern "C" fn in_domain_callback( raw_cb: *mut c_void, - raw_key: *const c_char, - key_len: size_t, + raw_key: *const c_char, + key_len: size_t, ) -> u8 { - let cb = &mut *(raw_cb as *mut TransformCallback); - let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); + let cb = &mut *(raw_cb as *mut TransformCallback); + let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); if (cb.in_domain_fn.unwrap())(key) { 1 diff --git a/tests/test_checkpoint.rs b/tests/test_checkpoint.rs index cfddb03..854a1d1 100644 --- a/tests/test_checkpoint.rs +++ b/tests/test_checkpoint.rs @@ -14,7 +14,7 @@ // extern crate rocksdb; -use rocksdb::{checkpoint::Checkpoint, DB, Options}; +use rocksdb::{checkpoint::Checkpoint, Options, DB}; use std::fs::remove_dir_all; #[test] diff --git a/tests/test_column_family.rs b/tests/test_column_family.rs index dba89da..7525116 100644 --- a/tests/test_column_family.rs +++ b/tests/test_column_family.rs @@ -15,7 +15,7 @@ extern crate rocksdb; mod util; -use rocksdb::{DB, MergeOperands, Options, ColumnFamilyDescriptor}; +use rocksdb::{ColumnFamilyDescriptor, MergeOperands, Options, DB}; use util::DBPath; #[test] @@ -42,16 +42,15 @@ pub fn test_column_family() { let mut opts = Options::default(); opts.set_merge_operator("test operator", test_provided_merge, None); match DB::open(&opts, &n) { - Ok(_db) => { - panic!("should not have opened DB successfully without \ + Ok(_db) => panic!( + "should not have opened DB successfully without \ specifying column - families") - } - Err(e) => { - assert!(e.to_string() - .starts_with("Invalid argument: You have to open all \ - column families.")) - } + families" + ), + Err(e) => assert!(e.to_string().starts_with( + "Invalid argument: You have to open all \ + column families." + )), } } @@ -76,11 +75,9 @@ pub fn test_column_family() { } // TODO should be able to use writebatch ops with a cf - { - } + {} // TODO should be able to iterate over a cf - { - } + {} // should b able to drop a cf { let mut db = DB::open_cf(&Options::default(), &n, &["cf1"]).unwrap(); @@ -136,12 +133,10 @@ fn test_merge_operator() { println!("m is {:?}", m); // TODO assert!(m.is_ok()); match db.get(b"k1") { - Ok(Some(value)) => { - match value.to_utf8() { - Some(v) => println!("retrieved utf8 value: {}", v), - None => println!("did not read valid utf-8 out of the db"), - } - } + Ok(Some(value)) => match value.to_utf8() { + Some(v) => println!("retrieved utf8 value: {}", v), + None => println!("did not read valid utf-8 out of the db"), + }, Err(_) => println!("error reading value"), _ => panic!("value not present!"), } @@ -151,13 +146,13 @@ fn test_merge_operator() { assert!(db.delete(b"k1").is_ok()); assert!(db.get(b"k1").unwrap().is_none()); } - } -fn test_provided_merge(_: &[u8], - existing_val: Option<&[u8]>, - operands: &mut MergeOperands) - -> Option> { +fn test_provided_merge( + _: &[u8], + existing_val: Option<&[u8]>, + operands: &mut MergeOperands, +) -> Option> { let nops = operands.size_hint().0; let mut result: Vec = Vec::with_capacity(nops); match existing_val { @@ -192,7 +187,10 @@ pub fn test_column_family_with_options() { match DB::open_cf_descriptors(&opts, &n, cfs) { Ok(_db) => println!("created db with column family descriptors succesfully"), Err(e) => { - panic!("could not create new database with column family descriptors: {}", e); + panic!( + "could not create new database with column family descriptors: {}", + e + ); } } } @@ -208,7 +206,10 @@ pub fn test_column_family_with_options() { match DB::open_cf_descriptors(&opts, &n, cfs) { Ok(_db) => println!("succesfully re-opened database with column family descriptors"), Err(e) => { - panic!("unable to re-open database with column family descriptors: {}", e); + panic!( + "unable to re-open database with column family descriptors: {}", + e + ); } } } diff --git a/tests/test_iterator.rs b/tests/test_iterator.rs index fb735f7..b96fe42 100644 --- a/tests/test_iterator.rs +++ b/tests/test_iterator.rs @@ -15,7 +15,7 @@ extern crate rocksdb; mod util; -use rocksdb::{DB, Direction, IteratorMode, MemtableFactory, Options}; +use rocksdb::{Direction, IteratorMode, MemtableFactory, Options, DB}; use util::DBPath; fn cba(input: &Box<[u8]>) -> Box<[u8]> { @@ -41,7 +41,11 @@ pub fn test_iterator() { assert!(p.is_ok()); let p = db.put(&*k3, &*v3); assert!(p.is_ok()); - let expected = vec![(cba(&k1), cba(&v1)), (cba(&k2), cba(&v2)), (cba(&k3), cba(&v3))]; + let expected = vec![ + (cba(&k1), cba(&v1)), + (cba(&k2), cba(&v2)), + (cba(&k3), cba(&v3)), + ]; { let iterator1 = db.iterator(IteratorMode::Start); assert_eq!(iterator1.collect::>(), expected); @@ -103,10 +107,12 @@ pub fn test_iterator() { let old_iterator = db.iterator(IteratorMode::Start); let p = db.put(&*k4, &*v4); assert!(p.is_ok()); - let expected2 = vec![(cba(&k1), cba(&v1)), - (cba(&k2), cba(&v2)), - (cba(&k3), cba(&v3)), - (cba(&k4), cba(&v4))]; + let expected2 = vec![ + (cba(&k1), cba(&v1)), + (cba(&k2), cba(&v2)), + (cba(&k3), cba(&v3)), + (cba(&k4), cba(&v4)), + ]; { assert_eq!(old_iterator.collect::>(), expected); } @@ -116,7 +122,11 @@ pub fn test_iterator() { } { let iterator1 = db.iterator(IteratorMode::From(b"k2", Direction::Forward)); - let expected = vec![(cba(&k2), cba(&v2)), (cba(&k3), cba(&v3)), (cba(&k4), cba(&v4))]; + let expected = vec![ + (cba(&k2), cba(&v2)), + (cba(&k3), cba(&v3)), + (cba(&k4), cba(&v4)), + ]; assert_eq!(iterator1.collect::>(), expected); } { @@ -157,7 +167,9 @@ pub fn test_iterator() { } } -fn key(k: &[u8]) -> Box<[u8]> { k.to_vec().into_boxed_slice() } +fn key(k: &[u8]) -> Box<[u8]> { + k.to_vec().into_boxed_slice() +} #[test] pub fn test_prefix_iterator() { diff --git a/tests/test_multithreaded.rs b/tests/test_multithreaded.rs index 38e5d1b..141007f 100644 --- a/tests/test_multithreaded.rs +++ b/tests/test_multithreaded.rs @@ -16,11 +16,10 @@ extern crate rocksdb; mod util; use rocksdb::DB; -use std::thread; use std::sync::Arc; +use std::thread; use util::DBPath; - const N: usize = 100_000; #[test] diff --git a/tests/test_raw_iterator.rs b/tests/test_raw_iterator.rs index 080bb7e..8d4f019 100644 --- a/tests/test_raw_iterator.rs +++ b/tests/test_raw_iterator.rs @@ -41,9 +41,9 @@ pub fn test_forwards_iteration() { assert_eq!(iter.key(), Some(b"k2".to_vec())); assert_eq!(iter.value(), Some(b"v2".to_vec())); - iter.next(); // k3 - iter.next(); // k4 - iter.next(); // invalid! + iter.next(); // k3 + iter.next(); // k4 + iter.next(); // invalid! assert_eq!(iter.valid(), false); assert_eq!(iter.key(), None); @@ -51,7 +51,6 @@ pub fn test_forwards_iteration() { } } - #[test] pub fn test_seek_last() { let n = DBPath::new("backwards_iteration"); @@ -75,9 +74,9 @@ pub fn test_seek_last() { assert_eq!(iter.key(), Some(b"k3".to_vec())); assert_eq!(iter.value(), Some(b"v3".to_vec())); - iter.prev(); // k2 - iter.prev(); // k1 - iter.prev(); // invalid! + iter.prev(); // k2 + iter.prev(); // k1 + iter.prev(); // invalid! assert_eq!(iter.valid(), false); assert_eq!(iter.key(), None); @@ -85,7 +84,6 @@ pub fn test_seek_last() { } } - #[test] pub fn test_seek() { let n = DBPath::new("seek"); @@ -111,7 +109,6 @@ pub fn test_seek() { } } - #[test] pub fn test_seek_to_nonexistant() { let n = DBPath::new("seek_to_nonexistant"); diff --git a/tests/test_rocksdb_options.rs b/tests/test_rocksdb_options.rs index 9429a6a..56a77b8 100644 --- a/tests/test_rocksdb_options.rs +++ b/tests/test_rocksdb_options.rs @@ -15,7 +15,7 @@ extern crate rocksdb; mod util; -use rocksdb::{DB, Options}; +use rocksdb::{Options, DB}; use util::DBPath; #[test] diff --git a/tests/test_slice_transform.rs b/tests/test_slice_transform.rs index 484ad6f..44eebad 100644 --- a/tests/test_slice_transform.rs +++ b/tests/test_slice_transform.rs @@ -1,7 +1,7 @@ extern crate rocksdb; mod util; -use rocksdb::{DB, Options, SliceTransform}; +use rocksdb::{Options, SliceTransform, DB}; use util::DBPath; #[test] @@ -34,7 +34,9 @@ pub fn test_slice_transform() { input.iter().cloned().collect::>().into_boxed_slice() } - fn key(k: &[u8]) -> Box<[u8]> { k.to_vec().into_boxed_slice() } + fn key(k: &[u8]) -> Box<[u8]> { + k.to_vec().into_boxed_slice() + } { let expected = vec![(cba(&a1), cba(&a1)), (cba(&a2), cba(&a2))]; diff --git a/tests/util/mod.rs b/tests/util/mod.rs index e94413e..1e028ba 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -1,13 +1,13 @@ extern crate rocksdb; +use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; -use std::path::{PathBuf, Path}; -use rocksdb::{DB, Options}; +use rocksdb::{Options, DB}; /// Ensures that DB::Destroy is called for this database when DBPath is dropped. pub struct DBPath { - path: PathBuf + path: PathBuf, } impl DBPath { @@ -22,7 +22,9 @@ impl DBPath { current_time.subsec_nanos() ); - DBPath { path: PathBuf::from(path) } + DBPath { + path: PathBuf::from(path), + } } } @@ -38,4 +40,3 @@ impl AsRef for DBPath { &self.path } } - From c04f0f5522113a6b5d7463c59cf4b936d5f694bf Mon Sep 17 00:00:00 2001 From: Jordan Terrell Date: Thu, 29 Nov 2018 06:07:24 -0600 Subject: [PATCH 2/3] Deleting rustfmt.toml since it applies default settings... --- rustfmt.toml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 rustfmt.toml diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 4e727a0..0000000 --- a/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -reorder_imports = true \ No newline at end of file From a824913a36bc43922552d489a53c4b6edb281de2 Mon Sep 17 00:00:00 2001 From: Jordan Terrell Date: Thu, 29 Nov 2018 06:28:27 -0600 Subject: [PATCH 3/3] Applying rustfmt to all code... --- librocksdb-sys/build.rs | 37 +- librocksdb-sys/src/lib.rs | 2 +- librocksdb-sys/tests/ffi.rs | 672 ++++++++++++++++++++---------------- 3 files changed, 399 insertions(+), 312 deletions(-) diff --git a/librocksdb-sys/build.rs b/librocksdb-sys/build.rs index c687e20..7eb4d0e 100644 --- a/librocksdb-sys/build.rs +++ b/librocksdb-sys/build.rs @@ -1,5 +1,5 @@ -extern crate cc; extern crate bindgen; +extern crate cc; extern crate glob; use std::env; @@ -49,7 +49,7 @@ fn build_rocksdb() { config.include("rocksdb/include/"); config.include("rocksdb/"); config.include("rocksdb/third-party/gtest-1.7.0/fused-src/"); - + if cfg!(feature = "snappy") { config.define("SNAPPY", Some("1")); config.include("snappy/"); @@ -70,7 +70,7 @@ fn build_rocksdb() { config.define("ZLIB", Some("1")); config.include("zlib/"); } - + if cfg!(feature = "bzip2") { config.define("BZIP2", Some("1")); config.include("bzip2/"); @@ -94,7 +94,6 @@ fn build_rocksdb() { config.define("OS_MACOSX", Some("1")); config.define("ROCKSDB_PLATFORM_POSIX", Some("1")); config.define("ROCKSDB_LIB_IO_POSIX", Some("1")); - } if cfg!(target_os = "linux") { config.define("OS_LINUX", Some("1")); @@ -118,12 +117,9 @@ fn build_rocksdb() { .iter() .cloned() .filter(|file| match *file { - "port/port_posix.cc" | - "env/env_posix.cc" | - "env/io_posix.cc" => false, + "port/port_posix.cc" | "env/env_posix.cc" | "env/io_posix.cc" => false, _ => true, - }) - .collect::>(); + }).collect::>(); // Add Windows-specific sources lib_sources.push("port/win/port_win.cc"); @@ -176,7 +172,7 @@ fn build_snappy() { fn build_lz4() { let mut compiler = cc::Build::new(); - + compiler .file("lz4/lib/lz4.c") .file("lz4/lib/lz4frame.c") @@ -185,12 +181,11 @@ fn build_lz4() { compiler.opt_level(3); - match env::var("TARGET").unwrap().as_str() - { - "i686-pc-windows-gnu" => { - compiler.flag("-fno-tree-vectorize"); - }, - _ => {} + match env::var("TARGET").unwrap().as_str() { + "i686-pc-windows-gnu" => { + compiler.flag("-fno-tree-vectorize"); + } + _ => {} } compiler.compile("liblz4.a"); @@ -198,7 +193,7 @@ fn build_lz4() { fn build_zstd() { let mut compiler = cc::Build::new(); - + compiler.include("zstd/lib/"); compiler.include("zstd/lib/common"); compiler.include("zstd/lib/legacy"); @@ -226,10 +221,8 @@ fn build_zstd() { fn build_zlib() { let mut compiler = cc::Build::new(); - - let globs = &[ - "zlib/*.c" - ]; + + let globs = &["zlib/*.c"]; for pattern in globs { for path in glob::glob(pattern).unwrap() { @@ -244,7 +237,7 @@ fn build_zlib() { fn build_bzip2() { let mut compiler = cc::Build::new(); - + compiler .file("bzip2/blocksort.c") .file("bzip2/bzlib.c") diff --git a/librocksdb-sys/src/lib.rs b/librocksdb-sys/src/lib.rs index ea6d2d0..b154977 100644 --- a/librocksdb-sys/src/lib.rs +++ b/librocksdb-sys/src/lib.rs @@ -25,4 +25,4 @@ include!(concat!(env!("OUT_DIR"), "/bindings.rs")); #[no_mangle] pub fn bz_internal_error(errcode: c_int) { panic!("bz internal error: {}", errcode); -} \ No newline at end of file +} diff --git a/librocksdb-sys/tests/ffi.rs b/librocksdb-sys/tests/ffi.rs index 13a1a98..87f1519 100644 --- a/librocksdb-sys/tests/ffi.rs +++ b/librocksdb-sys/tests/ffi.rs @@ -15,7 +15,13 @@ // This code is based on , revision a10e8a056d569acf6a52045124e6414ad33bdfcd. -#![allow(non_snake_case, non_upper_case_globals, unused_mut, unused_unsafe, unused_variables)] +#![allow( + non_snake_case, + non_upper_case_globals, + unused_mut, + unused_unsafe, + unused_variables +)] #[macro_use] extern crate const_cstr; @@ -23,18 +29,18 @@ extern crate libc; extern crate librocksdb_sys as ffi; extern crate uuid; -use ::ffi::*; -use ::libc::*; -use ::std::borrow::Cow; -use ::std::env; -use ::std::ffi::{CStr, CString}; -use ::std::io::Write; -use ::std::mem; -use ::std::path::PathBuf; -use ::std::ptr; -use ::std::slice; -use ::std::str; -use ::uuid::Uuid; +use ffi::*; +use libc::*; +use std::borrow::Cow; +use std::env; +use std::ffi::{CStr, CString}; +use std::io::Write; +use std::mem; +use std::path::PathBuf; +use std::ptr; +use std::slice; +use std::str; +use uuid::Uuid; macro_rules! err_println { ($($arg:tt)*) => (writeln!(&mut ::std::io::stderr(), $($arg)*).expect("failed printing to stderr")); @@ -77,28 +83,37 @@ unsafe fn StartPhase(name: &'static str) { } macro_rules! CheckNoError { - ($err:ident) => { unsafe { - assert!($err.is_null(), "{}: {}", phase, rstr($err)); - } }; + ($err:ident) => { + unsafe { + assert!($err.is_null(), "{}: {}", phase, rstr($err)); + } + }; } macro_rules! CheckCondition { - ($cond:expr) => { unsafe { - assert!($cond, "{}: {}", phase, stringify!($cond)); - } }; + ($cond:expr) => { + unsafe { + assert!($cond, "{}: {}", phase, stringify!($cond)); + } + }; } unsafe fn CheckEqual(expected: *const c_char, v: *const c_char, n: size_t) { if expected.is_null() && v.is_null() { // ok - } else if !expected.is_null() && !v.is_null() && n == strlen(expected) && - memcmp(expected as *const c_void, v as *const c_void, n) == 0 { + } else if !expected.is_null() + && !v.is_null() + && n == strlen(expected) + && memcmp(expected as *const c_void, v as *const c_void, n) == 0 + { // ok } else { - panic!("{}: expected '{}', got '{}'", - phase, - rstr(strndup(expected, n)), - rstr(strndup(v, 5))); + panic!( + "{}: expected '{}', got '{}'", + phase, + rstr(strndup(expected, n)), + rstr(strndup(v, 5)) + ); } } @@ -109,10 +124,12 @@ unsafe fn Free(ptr: *mut *mut T) { } } -unsafe fn CheckGet(mut db: *mut rocksdb_t, - options: *mut rocksdb_readoptions_t, - key: *const c_char, - expected: *const c_char) { +unsafe fn CheckGet( + mut db: *mut rocksdb_t, + options: *mut rocksdb_readoptions_t, + key: *const c_char, + expected: *const c_char, +) { let mut err: *mut c_char = ptr::null_mut(); let mut val_len: size_t = 0; let mut val: *mut c_char = rocksdb_get(db, options, key, strlen(key), &mut val_len, &mut err); @@ -121,20 +138,24 @@ unsafe fn CheckGet(mut db: *mut rocksdb_t, Free(&mut val); } -unsafe fn CheckGetCF(db: *mut rocksdb_t, - options: *const rocksdb_readoptions_t, - handle: *mut rocksdb_column_family_handle_t, - key: *const c_char, - expected: *const c_char) { +unsafe fn CheckGetCF( + db: *mut rocksdb_t, + options: *const rocksdb_readoptions_t, + handle: *mut rocksdb_column_family_handle_t, + key: *const c_char, + expected: *const c_char, +) { let mut err: *mut c_char = ptr::null_mut(); let mut val_len: size_t = 0; - let mut val: *mut c_char = rocksdb_get_cf(db, - options, - handle, - key, - strlen(key), - &mut val_len, - &mut err); + let mut val: *mut c_char = rocksdb_get_cf( + db, + options, + handle, + key, + strlen(key), + &mut val_len, + &mut err, + ); CheckNoError!(err); CheckEqual(expected, val, val_len); Free(&mut val); @@ -150,11 +171,13 @@ unsafe fn CheckIter(iter: *mut rocksdb_iterator_t, key: *const c_char, val: *con } // Callback from rocksdb_writebatch_iterate() -unsafe extern "C" fn CheckPut(ptr: *mut c_void, - k: *const c_char, - klen: size_t, - v: *const c_char, - vlen: size_t) { +unsafe extern "C" fn CheckPut( + ptr: *mut c_void, + k: *const c_char, + klen: size_t, + v: *const c_char, + vlen: size_t, +) { let mut state: *mut c_int = ptr as *mut c_int; CheckCondition!(*state < 2); match *state { @@ -181,17 +204,14 @@ unsafe extern "C" fn CheckDel(ptr: *mut c_void, k: *const c_char, klen: size_t) unsafe extern "C" fn CmpDestroy(arg: *mut c_void) {} -unsafe extern "C" fn CmpCompare(arg: *mut c_void, - a: *const c_char, - alen: size_t, - b: *const c_char, - blen: size_t) - -> c_int { - let n = if alen < blen { - alen - } else { - blen - }; +unsafe extern "C" fn CmpCompare( + arg: *mut c_void, + a: *const c_char, + alen: size_t, + b: *const c_char, + blen: size_t, +) -> c_int { + let n = if alen < blen { alen } else { blen }; let mut r = memcmp(a as *const c_void, b as *const c_void, n); if r == 0 { if alen < blen { @@ -217,28 +237,34 @@ unsafe extern "C" fn FilterName(arg: *mut c_void) -> *const c_char { cstrp!("TestFilter") } -unsafe extern "C" fn FilterCreate(arg: *mut c_void, - key_array: *const *const c_char, - key_length_array: *const size_t, - num_keys: c_int, - filter_length: *mut size_t) - -> *mut c_char { +unsafe extern "C" fn FilterCreate( + arg: *mut c_void, + key_array: *const *const c_char, + key_length_array: *const size_t, + num_keys: c_int, + filter_length: *mut size_t, +) -> *mut c_char { *filter_length = 4; let result = malloc(4); memcpy(result, cstrp!("fake") as *const c_void, 4); result as *mut c_char } -unsafe extern "C" fn FilterKeyMatch(arg: *mut c_void, - key: *const c_char, - length: size_t, - filter: *const c_char, - filter_length: size_t) - -> c_uchar { +unsafe extern "C" fn FilterKeyMatch( + arg: *mut c_void, + key: *const c_char, + length: size_t, + filter: *const c_char, + filter_length: size_t, +) -> c_uchar { CheckCondition!(filter_length == 4); - CheckCondition!(memcmp(filter as *const c_void, - cstrp!("fake") as *const c_void, - filter_length) == 0); + CheckCondition!( + memcmp( + filter as *const c_void, + cstrp!("fake") as *const c_void, + filter_length + ) == 0 + ); fake_filter_result } @@ -250,24 +276,31 @@ unsafe extern "C" fn CFilterName(arg: *mut c_void) -> *const c_char { cstrp!("foo") } -unsafe extern "C" fn CFilterFilter(arg: *mut c_void, - level: c_int, - key: *const c_char, - key_length: size_t, - existing_value: *const c_char, - value_length: size_t, - new_value: *mut *mut c_char, - new_value_length: *mut size_t, - value_changed: *mut u8) - -> c_uchar { +unsafe extern "C" fn CFilterFilter( + arg: *mut c_void, + level: c_int, + key: *const c_char, + key_length: size_t, + existing_value: *const c_char, + value_length: size_t, + new_value: *mut *mut c_char, + new_value_length: *mut size_t, + value_changed: *mut u8, +) -> c_uchar { if key_length == 3 { - if memcmp(mem::transmute(key), - mem::transmute(cstrp!("bar")), - key_length) == 0 { + if memcmp( + mem::transmute(key), + mem::transmute(cstrp!("bar")), + key_length, + ) == 0 + { return 1; - } else if memcmp(mem::transmute(key), - mem::transmute(cstrp!("baz")), - key_length) == 0 { + } else if memcmp( + mem::transmute(key), + mem::transmute(cstrp!("baz")), + key_length, + ) == 0 + { *value_changed = 1; *new_value = cstrp!("newbazvalue") as *mut c_char; *new_value_length = 11; @@ -283,49 +316,59 @@ unsafe extern "C" fn CFilterFactoryName(arg: *mut c_void) -> *const c_char { cstrp!("foo") } -unsafe extern "C" fn CFilterCreate(arg: *mut c_void, - context: *mut rocksdb_compactionfiltercontext_t) - -> *mut rocksdb_compactionfilter_t { - rocksdb_compactionfilter_create(ptr::null_mut(), - Some(CFilterDestroy), - Some(CFilterFilter), - Some(CFilterName)) +unsafe extern "C" fn CFilterCreate( + arg: *mut c_void, + context: *mut rocksdb_compactionfiltercontext_t, +) -> *mut rocksdb_compactionfilter_t { + rocksdb_compactionfilter_create( + ptr::null_mut(), + Some(CFilterDestroy), + Some(CFilterFilter), + Some(CFilterName), + ) } -unsafe fn CheckCompaction(dbname: *const c_char, - db: *mut rocksdb_t, - options: *const rocksdb_options_t, - roptions: *mut rocksdb_readoptions_t, - woptions: *mut rocksdb_writeoptions_t) - -> *mut rocksdb_t { +unsafe fn CheckCompaction( + dbname: *const c_char, + db: *mut rocksdb_t, + options: *const rocksdb_options_t, + roptions: *mut rocksdb_readoptions_t, + woptions: *mut rocksdb_writeoptions_t, +) -> *mut rocksdb_t { let mut err: *mut c_char = ptr::null_mut(); let db = rocksdb_open(options, dbname, &mut err); CheckNoError!(err); - rocksdb_put(db, - woptions, - cstrp!("foo"), - 3, - cstrp!("foovalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("foo"), + 3, + cstrp!("foovalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("foo"), cstrp!("foovalue")); - rocksdb_put(db, - woptions, - cstrp!("bar"), - 3, - cstrp!("barvalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("bar"), + 3, + cstrp!("barvalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("bar"), cstrp!("barvalue")); - rocksdb_put(db, - woptions, - cstrp!("baz"), - 3, - cstrp!("bazvalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("baz"), + 3, + cstrp!("bazvalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("baz"), cstrp!("bazvalue")); @@ -346,17 +389,18 @@ unsafe extern "C" fn MergeOperatorName(arg: *mut c_void) -> *const c_char { cstrp!("foo") } -unsafe extern "C" fn MergeOperatorFullMerge(arg: *mut c_void, - key: *const c_char, - key_length: size_t, - existing_value: *const c_char, - existing_value_length: size_t, - operands_list: *const *const c_char, - operands_list_length: *const size_t, - num_operands: c_int, - success: *mut u8, - new_value_length: *mut size_t) - -> *mut c_char { +unsafe extern "C" fn MergeOperatorFullMerge( + arg: *mut c_void, + key: *const c_char, + key_length: size_t, + existing_value: *const c_char, + existing_value_length: size_t, + operands_list: *const *const c_char, + operands_list_length: *const size_t, + num_operands: c_int, + success: *mut u8, + new_value_length: *mut size_t, +) -> *mut c_char { *new_value_length = 4; *success = 1; let result: *mut c_char = malloc(4) as *mut _; @@ -364,15 +408,16 @@ unsafe extern "C" fn MergeOperatorFullMerge(arg: *mut c_void, result } -unsafe extern "C" fn MergeOperatorPartialMerge(arg: *mut c_void, - key: *const c_char, - key_length: size_t, - operands_list: *const *const c_char, - operands_list_length: *const size_t, - num_operands: c_int, - success: *mut u8, - new_value_length: *mut size_t) - -> *mut c_char { +unsafe extern "C" fn MergeOperatorPartialMerge( + arg: *mut c_void, + key: *const c_char, + key_length: size_t, + operands_list: *const *const c_char, + operands_list_length: *const size_t, + num_operands: c_int, + success: *mut u8, + new_value_length: *mut size_t, +) -> *mut c_char { *new_value_length = 4; *success = 1; let result: *mut c_char = malloc(4) as *mut _; @@ -413,10 +458,12 @@ fn ffi() { let dbbackupname = dbbackupname.as_ptr(); StartPhase("create_objects"); - cmp = rocksdb_comparator_create(ptr::null_mut(), - Some(CmpDestroy), - Some(CmpCompare), - Some(CmpName)); + cmp = rocksdb_comparator_create( + ptr::null_mut(), + Some(CmpDestroy), + Some(CmpCompare), + Some(CmpName), + ); env = rocksdb_create_default_env(); cache = rocksdb_cache_create_lru(100000); @@ -440,10 +487,12 @@ fn ffi() { no_compression, no_compression, no_compression, - ]; - rocksdb_options_set_compression_per_level(options, - mem::transmute(compression_levels.as_ptr()), - compression_levels.len() as size_t); + ]; + rocksdb_options_set_compression_per_level( + options, + mem::transmute(compression_levels.as_ptr()), + compression_levels.len() as size_t, + ); roptions = rocksdb_readoptions_create(); rocksdb_readoptions_set_verify_checksums(roptions, 1); @@ -513,11 +562,13 @@ fn ffi() { let restore_options = rocksdb_restore_options_create(); rocksdb_restore_options_set_keep_log_files(restore_options, 0); - rocksdb_backup_engine_restore_db_from_latest_backup(be, - dbname, - dbname, - restore_options, - &mut err); + rocksdb_backup_engine_restore_db_from_latest_backup( + be, + dbname, + dbname, + restore_options, + &mut err, + ); CheckNoError!(err); rocksdb_restore_options_destroy(restore_options); @@ -553,10 +604,12 @@ fn ffi() { CheckGet(db, roptions, cstrp!("bar"), ptr::null()); CheckGet(db, roptions, cstrp!("box"), cstrp!("c")); let mut pos: c_int = 0; - rocksdb_writebatch_iterate(wb, - mem::transmute(&mut pos), - Some(CheckPut), - Some(CheckDel)); + rocksdb_writebatch_iterate( + wb, + mem::transmute(&mut pos), + Some(CheckPut), + Some(CheckDel), + ); CheckCondition!(pos == 3); rocksdb_writebatch_destroy(wb); } @@ -568,13 +621,15 @@ fn ffi() { let k_sizes: [size_t; 2] = [1, 2]; let v_list: [*const c_char; 3] = [cstrp!("x"), cstrp!("y"), cstrp!("z")]; let v_sizes: [size_t; 3] = [1, 1, 1]; - rocksdb_writebatch_putv(wb, - k_list.len() as c_int, - k_list.as_ptr(), - k_sizes.as_ptr(), - v_list.len() as c_int, - v_list.as_ptr(), - v_sizes.as_ptr()); + rocksdb_writebatch_putv( + wb, + k_list.len() as c_int, + k_list.as_ptr(), + k_sizes.as_ptr(), + v_list.len() as c_int, + v_list.as_ptr(), + v_sizes.as_ptr(), + ); rocksdb_write(db, woptions, wb, &mut err); CheckNoError!(err); CheckGet(db, roptions, cstrp!("zap"), cstrp!("xyz")); @@ -596,10 +651,13 @@ fn ffi() { let mut wb2 = rocksdb_writebatch_create_from(rep as *const c_char, repsize1); CheckCondition!(rocksdb_writebatch_count(wb1) == rocksdb_writebatch_count(wb2)); let mut repsize2: size_t = 0; - CheckCondition!(memcmp(rep, - rocksdb_writebatch_data(wb2, &mut repsize2) as *const c_void, - repsize1) == - 0); + CheckCondition!( + memcmp( + rep, + rocksdb_writebatch_data(wb2, &mut repsize2) as *const c_void, + repsize1 + ) == 0 + ); rocksdb_writebatch_destroy(wb1); rocksdb_writebatch_destroy(wb2); } @@ -633,14 +691,16 @@ fn ffi() { let mut vals: [*mut c_char; 3] = [ptr::null_mut(), ptr::null_mut(), ptr::null_mut()]; let mut vals_sizes: [size_t; 3] = [0, 0, 0]; let mut errs: [*mut c_char; 3] = [ptr::null_mut(), ptr::null_mut(), ptr::null_mut()]; - rocksdb_multi_get(db, - roptions, - 3, - keys.as_ptr(), - keys_sizes.as_ptr(), - vals.as_mut_ptr(), - vals_sizes.as_mut_ptr(), - errs.as_mut_ptr()); + rocksdb_multi_get( + db, + roptions, + 3, + keys.as_ptr(), + keys_sizes.as_ptr(), + vals.as_mut_ptr(), + vals_sizes.as_mut_ptr(), + errs.as_mut_ptr(), + ); for i in 0..3 { CheckEqual(ptr::null(), errs[i], 0); @@ -667,22 +727,26 @@ fn ffi() { let key = keybuf.to_bytes_with_nul(); let valbuf = CString::new(format!("v{:020}", i)).unwrap(); let val = valbuf.to_bytes_with_nul(); - rocksdb_put(db, - woptions, - key.as_ptr() as *const c_char, - key.len() as size_t, - val.as_ptr() as *const c_char, - val.len() as size_t, - &mut err); + rocksdb_put( + db, + woptions, + key.as_ptr() as *const c_char, + key.len() as size_t, + val.as_ptr() as *const c_char, + val.len() as size_t, + &mut err, + ); CheckNoError!(err); } - rocksdb_approximate_sizes(db, - 2, - start.as_ptr(), - start_len.as_ptr(), - limit.as_ptr(), - limit_len.as_ptr(), - sizes.as_mut_ptr()); + rocksdb_approximate_sizes( + db, + 2, + start.as_ptr(), + start_len.as_ptr(), + limit.as_ptr(), + limit_len.as_ptr(), + sizes.as_mut_ptr(), + ); CheckCondition!(sizes[0] > 0); CheckCondition!(sizes[1] > 0); } @@ -733,12 +797,14 @@ fn ffi() { // First run uses custom filter, second run uses bloom filter CheckNoError!(err); let mut policy: *mut rocksdb_filterpolicy_t = if run == 0 { - rocksdb_filterpolicy_create(ptr::null_mut(), - Some(FilterDestroy), - Some(FilterCreate), - Some(FilterKeyMatch), - None, - Some(FilterName)) + rocksdb_filterpolicy_create( + ptr::null_mut(), + Some(FilterDestroy), + Some(FilterCreate), + Some(FilterKeyMatch), + None, + Some(FilterName), + ) } else { rocksdb_filterpolicy_create_bloom(10) }; @@ -751,21 +817,25 @@ fn ffi() { rocksdb_options_set_block_based_table_factory(options, table_options); db = rocksdb_open(options, dbname, &mut err); CheckNoError!(err); - rocksdb_put(db, - woptions, - cstrp!("foo"), - 3, - cstrp!("foovalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("foo"), + 3, + cstrp!("foovalue"), + 8, + &mut err, + ); CheckNoError!(err); - rocksdb_put(db, - woptions, - cstrp!("bar"), - 3, - cstrp!("barvalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("bar"), + 3, + cstrp!("barvalue"), + 8, + &mut err, + ); CheckNoError!(err); rocksdb_compact_range(db, ptr::null(), 0, ptr::null(), 0); @@ -791,10 +861,12 @@ fn ffi() { { let options_with_filter = rocksdb_options_create(); rocksdb_options_set_create_if_missing(options_with_filter, 1); - let cfilter = rocksdb_compactionfilter_create(ptr::null_mut(), - Some(CFilterDestroy), - Some(CFilterFilter), - Some(CFilterName)); + let cfilter = rocksdb_compactionfilter_create( + ptr::null_mut(), + Some(CFilterDestroy), + Some(CFilterFilter), + Some(CFilterName), + ); // Create new database rocksdb_close(db); rocksdb_destroy_db(options_with_filter, dbname, &mut err); @@ -810,62 +882,74 @@ fn ffi() { { let mut options_with_filter_factory = rocksdb_options_create(); rocksdb_options_set_create_if_missing(options_with_filter_factory, 1); - let mut factory = rocksdb_compactionfilterfactory_create(ptr::null_mut(), - Some(CFilterFactoryDestroy), - Some(CFilterCreate), - Some(CFilterFactoryName)); + let mut factory = rocksdb_compactionfilterfactory_create( + ptr::null_mut(), + Some(CFilterFactoryDestroy), + Some(CFilterCreate), + Some(CFilterFactoryName), + ); // Create new database rocksdb_close(db); rocksdb_destroy_db(options_with_filter_factory, dbname, &mut err); rocksdb_options_set_compaction_filter_factory(options_with_filter_factory, factory); db = CheckCompaction(dbname, db, options_with_filter_factory, roptions, woptions); - rocksdb_options_set_compaction_filter_factory(options_with_filter_factory, - ptr::null_mut()); + rocksdb_options_set_compaction_filter_factory( + options_with_filter_factory, + ptr::null_mut(), + ); rocksdb_options_destroy(options_with_filter_factory); } StartPhase("merge_operator"); { - let mut merge_operator = rocksdb_mergeoperator_create(ptr::null_mut(), - Some(MergeOperatorDestroy), - Some(MergeOperatorFullMerge), - Some(MergeOperatorPartialMerge), - None, - Some(MergeOperatorName)); + let mut merge_operator = rocksdb_mergeoperator_create( + ptr::null_mut(), + Some(MergeOperatorDestroy), + Some(MergeOperatorFullMerge), + Some(MergeOperatorPartialMerge), + None, + Some(MergeOperatorName), + ); // Create new database rocksdb_close(db); rocksdb_destroy_db(options, dbname, &mut err); rocksdb_options_set_merge_operator(options, merge_operator); db = rocksdb_open(options, dbname, &mut err); CheckNoError!(err); - rocksdb_put(db, - woptions, - cstrp!("foo"), - 3, - cstrp!("foovalue"), - 8, - &mut err); + rocksdb_put( + db, + woptions, + cstrp!("foo"), + 3, + cstrp!("foovalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("foo"), cstrp!("foovalue")); - rocksdb_merge(db, - woptions, - cstrp!("foo"), - 3, - cstrp!("barvalue"), - 8, - &mut err); + rocksdb_merge( + db, + woptions, + cstrp!("foo"), + 3, + cstrp!("barvalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("foo"), cstrp!("fake")); // Merge of a non-existing value - rocksdb_merge(db, - woptions, - cstrp!("bar"), - 3, - cstrp!("barvalue"), - 8, - &mut err); + rocksdb_merge( + db, + woptions, + cstrp!("bar"), + 3, + cstrp!("barvalue"), + 8, + &mut err, + ); CheckNoError!(err); CheckGet(db, roptions, cstrp!("bar"), cstrp!("fake")); } @@ -898,25 +982,29 @@ fn ffi() { let mut cf_names: [*const c_char; 2] = [cstrp!("default"), cstrp!("cf1")]; let mut cf_opts: [*const rocksdb_options_t; 2] = [cf_options, cf_options]; - let mut handles: [*mut rocksdb_column_family_handle_t; 2] = [ptr::null_mut(), - ptr::null_mut()]; - db = rocksdb_open_column_families(db_options, - dbname, - 2, - cf_names.as_mut_ptr(), - cf_opts.as_mut_ptr(), - handles.as_mut_ptr(), - &mut err); + let mut handles: [*mut rocksdb_column_family_handle_t; 2] = + [ptr::null_mut(), ptr::null_mut()]; + db = rocksdb_open_column_families( + db_options, + dbname, + 2, + cf_names.as_mut_ptr(), + cf_opts.as_mut_ptr(), + handles.as_mut_ptr(), + &mut err, + ); CheckNoError!(err); - rocksdb_put_cf(db, - woptions, - handles[1], - cstrp!("foo"), - 3, - cstrp!("hello"), - 5, - &mut err); + rocksdb_put_cf( + db, + woptions, + handles[1], + cstrp!("foo"), + 3, + cstrp!("hello"), + 5, + &mut err, + ); CheckNoError!(err); CheckGetCF(db, roptions, handles[1], cstrp!("foo"), cstrp!("hello")); @@ -940,21 +1028,23 @@ fn ffi() { rocksdb_writebatch_destroy(wb); let keys: [*const c_char; 3] = [cstrp!("box"), cstrp!("box"), cstrp!("barfooxx")]; - let get_handles: [*const rocksdb_column_family_handle_t; 3] = [handles[0], handles[1], - handles[1]]; + let get_handles: [*const rocksdb_column_family_handle_t; 3] = + [handles[0], handles[1], handles[1]]; let keys_sizes: [size_t; 3] = [3, 3, 8]; let mut vals: [*mut c_char; 3] = [ptr::null_mut(), ptr::null_mut(), ptr::null_mut()]; let mut vals_sizes: [size_t; 3] = [0, 0, 0]; let mut errs: [*mut c_char; 3] = [ptr::null_mut(), ptr::null_mut(), ptr::null_mut()]; - rocksdb_multi_get_cf(db, - roptions, - get_handles.as_ptr(), - 3, - keys.as_ptr(), - keys_sizes.as_ptr(), - vals.as_mut_ptr(), - vals_sizes.as_mut_ptr(), - errs.as_mut_ptr()); + rocksdb_multi_get_cf( + db, + roptions, + get_handles.as_ptr(), + 3, + keys.as_ptr(), + keys_sizes.as_ptr(), + vals.as_mut_ptr(), + vals_sizes.as_mut_ptr(), + errs.as_mut_ptr(), + ); for i in 0..3 { CheckEqual(ptr::null(), errs[i], 0); @@ -982,16 +1072,18 @@ fn ffi() { CheckNoError!(err); rocksdb_iter_destroy(iter); - let mut iters_cf_handles: [*mut rocksdb_column_family_handle_t; 2] = [handles[0], - handles[1]]; - let mut iters_handles: [*mut rocksdb_iterator_t; 2] = [ptr::null_mut(), - ptr::null_mut()]; - rocksdb_create_iterators(db, - roptions, - iters_cf_handles.as_mut_ptr(), - iters_handles.as_mut_ptr(), - 2, - &mut err); + let mut iters_cf_handles: [*mut rocksdb_column_family_handle_t; 2] = + [handles[0], handles[1]]; + let mut iters_handles: [*mut rocksdb_iterator_t; 2] = + [ptr::null_mut(), ptr::null_mut()]; + rocksdb_create_iterators( + db, + roptions, + iters_cf_handles.as_mut_ptr(), + iters_handles.as_mut_ptr(), + 2, + &mut err, + ); CheckNoError!(err); iter = iters_handles[0]; @@ -1030,8 +1122,10 @@ fn ffi() { { // Create new database rocksdb_options_set_allow_mmap_reads(options, 1); - rocksdb_options_set_prefix_extractor(options, - rocksdb_slicetransform_create_fixed_prefix(3)); + rocksdb_options_set_prefix_extractor( + options, + rocksdb_slicetransform_create_fixed_prefix(3), + ); rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4); rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16); rocksdb_options_set_allow_concurrent_memtable_write(options, 0);