From 05350ac88c1d5b7f12502df5ff86a46e36554a1f Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Sat, 5 Jan 2019 17:17:15 +0200 Subject: [PATCH 1/5] Added a clear method for WriteBatch --- src/db.rs | 8 ++++++++ tests/test_write_batch.rs | 27 +++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 tests/test_write_batch.rs diff --git a/src/db.rs b/src/db.rs index 809fa92..414b884 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1223,6 +1223,14 @@ impl WriteBatch { Ok(()) } } + + /// Clear all updates buffered in this batch. + pub fn clear(&mut self) -> Result<(), Error> { + unsafe { + ffi::rocksdb_writebatch_clear(self.inner); + } + Ok(()) + } } impl Default for WriteBatch { diff --git a/tests/test_write_batch.rs b/tests/test_write_batch.rs new file mode 100644 index 0000000..cae5911 --- /dev/null +++ b/tests/test_write_batch.rs @@ -0,0 +1,27 @@ +// Copyright 2019 Tyler Neely +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +extern crate rocksdb; + +use rocksdb::WriteBatch; + +#[test] +fn test_write_batch_clear() { + let mut batch = WriteBatch::default(); + let _ = batch.put(b"1", b"2"); + assert_eq!(batch.len(), 1); + let _ = batch.clear(); + assert_eq!(batch.len(), 0); + assert!(batch.is_empty()); +} From fe26e038ab8dd9cd6458a88ed78344f4be847a6c Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Wed, 9 Jan 2019 13:35:46 +0200 Subject: [PATCH 2/5] Added a helper function `get_cf_names` and doc-tests refactoring --- src/compaction_filter.rs | 2 + src/db.rs | 328 ++++++++++++++++++++++++--------------- src/lib.rs | 59 ++++--- src/merge_operator.rs | 21 +-- 4 files changed, 256 insertions(+), 154 deletions(-) diff --git a/src/compaction_filter.rs b/src/compaction_filter.rs index 0c63b2a..ed15dde 100644 --- a/src/compaction_filter.rs +++ b/src/compaction_filter.rs @@ -131,4 +131,6 @@ fn compaction_filter_test() { assert!(db.get(b"_k").unwrap().is_none()); assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); } + let result = DB::destroy(&opts, path); + assert!(result.is_ok()); } diff --git a/src/db.rs b/src/db.rs index ed18a12..11149fb 100644 --- a/src/db.rs +++ b/src/db.rs @@ -67,16 +67,18 @@ pub enum DBRecoveryMode { /// Making an atomic commit of several writes: /// /// ``` -/// use rocksdb::{DB, WriteBatch}; +/// use rocksdb::{DB, Options, WriteBatch}; /// -/// let db = DB::open_default("path/for/rocksdb/storage1").unwrap(); +/// let path = "_path_for_rocksdb_storage1"; /// { +/// let db = DB::open_default(path).unwrap(); /// let mut batch = WriteBatch::default(); /// batch.put(b"my key", b"my value"); /// batch.put(b"key2", b"value2"); /// batch.put(b"key3", b"value3"); /// db.write(batch); // Atomically commits the batch /// } +/// let _ = DB::destroy(&Options::default(), path); /// ``` pub struct WriteBatch { inner: *mut ffi::rocksdb_writebatch_t, @@ -89,11 +91,15 @@ pub struct ReadOptions { /// A consistent view of the database at the point of creation. /// /// ``` -/// use rocksdb::{DB, IteratorMode}; +/// use rocksdb::{DB, IteratorMode, Options}; /// -/// let db = DB::open_default("path/for/rocksdb/storage3").unwrap(); -/// let snapshot = db.snapshot(); // Creates a longer-term snapshot of the DB, but closed when goes out of scope -/// let mut iter = snapshot.iterator(IteratorMode::Start); // Make as many iterators as you'd like from one snapshot +/// let path = "_path_for_rocksdb_storage3"; +/// { +/// let db = DB::open_default(path).unwrap(); +/// let snapshot = db.snapshot(); // Creates a longer-term snapshot of the DB, but closed when goes out of scope +/// let mut iter = snapshot.iterator(IteratorMode::Start); // Make as many iterators as you'd like from one snapshot +/// } +/// let _ = DB::destroy(&Options::default(), path); /// ``` /// pub struct Snapshot<'a> { @@ -110,40 +116,44 @@ pub struct Snapshot<'a> { /// widely recognised Rust idioms. /// /// ``` -/// use rocksdb::DB; +/// use rocksdb::{DB, Options}; /// -/// let mut db = DB::open_default("path/for/rocksdb/storage4").unwrap(); -/// let mut iter = db.raw_iterator(); +/// let path = "_path_for_rocksdb_storage4"; +/// { +/// let db = DB::open_default(path).unwrap(); +/// let mut iter = db.raw_iterator(); /// -/// // Forwards iteration -/// iter.seek_to_first(); -/// while iter.valid() { -/// println!("Saw {:?} {:?}", iter.key(), iter.value()); -/// iter.next(); -/// } +/// // Forwards iteration +/// iter.seek_to_first(); +/// while iter.valid() { +/// println!("Saw {:?} {:?}", iter.key(), iter.value()); +/// iter.next(); +/// } /// -/// // Reverse iteration -/// iter.seek_to_last(); -/// while iter.valid() { -/// println!("Saw {:?} {:?}", iter.key(), iter.value()); -/// iter.prev(); -/// } +/// // Reverse iteration +/// iter.seek_to_last(); +/// while iter.valid() { +/// println!("Saw {:?} {:?}", iter.key(), iter.value()); +/// iter.prev(); +/// } /// -/// // Seeking -/// iter.seek(b"my key"); -/// while iter.valid() { -/// println!("Saw {:?} {:?}", iter.key(), iter.value()); -/// iter.next(); -/// } +/// // Seeking +/// iter.seek(b"my key"); +/// while iter.valid() { +/// println!("Saw {:?} {:?}", iter.key(), iter.value()); +/// iter.next(); +/// } /// -/// // Reverse iteration from key -/// // Note, use seek_for_prev when reversing because if this key doesn't exist, -/// // this will make the iterator start from the previous key rather than the next. -/// iter.seek_for_prev(b"my key"); -/// while iter.valid() { -/// println!("Saw {:?} {:?}", iter.key(), iter.value()); -/// iter.prev(); +/// // Reverse iteration from key +/// // Note, use seek_for_prev when reversing because if this key doesn't exist, +/// // this will make the iterator start from the previous key rather than the next. +/// iter.seek_for_prev(b"my key"); +/// while iter.valid() { +/// println!("Saw {:?} {:?}", iter.key(), iter.value()); +/// iter.prev(); +/// } /// } +/// let _ = DB::destroy(&Options::default(), path); /// ``` pub struct DBRawIterator { inner: *mut ffi::rocksdb_iterator_t, @@ -153,28 +163,32 @@ pub struct DBRawIterator { /// ranges and direction. /// /// ``` -/// use rocksdb::{DB, Direction, IteratorMode}; +/// use rocksdb::{DB, Direction, IteratorMode, Options}; /// -/// let mut db = DB::open_default("path/for/rocksdb/storage2").unwrap(); -/// let mut iter = db.iterator(IteratorMode::Start); // Always iterates forward -/// for (key, value) in iter { -/// println!("Saw {:?} {:?}", key, value); -/// } -/// iter = db.iterator(IteratorMode::End); // Always iterates backward -/// for (key, value) in iter { -/// println!("Saw {:?} {:?}", key, value); -/// } -/// iter = db.iterator(IteratorMode::From(b"my key", Direction::Forward)); // From a key in Direction::{forward,reverse} -/// for (key, value) in iter { -/// println!("Saw {:?} {:?}", key, value); -/// } +/// let path = "_path_for_rocksdb_storage2"; +/// { +/// let db = DB::open_default(path).unwrap(); +/// let mut iter = db.iterator(IteratorMode::Start); // Always iterates forward +/// for (key, value) in iter { +/// println!("Saw {:?} {:?}", key, value); +/// } +/// iter = db.iterator(IteratorMode::End); // Always iterates backward +/// for (key, value) in iter { +/// println!("Saw {:?} {:?}", key, value); +/// } +/// iter = db.iterator(IteratorMode::From(b"my key", Direction::Forward)); // From a key in Direction::{forward,reverse} +/// for (key, value) in iter { +/// println!("Saw {:?} {:?}", key, value); +/// } /// -/// // You can seek with an existing Iterator instance, too -/// iter = db.iterator(IteratorMode::Start); -/// iter.set_mode(IteratorMode::From(b"another key", Direction::Reverse)); -/// for (key, value) in iter { -/// println!("Saw {:?} {:?}", key, value); +/// // You can seek with an existing Iterator instance, too +/// iter = db.iterator(IteratorMode::Start); +/// iter.set_mode(IteratorMode::From(b"another key", Direction::Reverse)); +/// for (key, value) in iter { +/// println!("Saw {:?} {:?}", key, value); +/// } /// } +/// let _ = DB::destroy(&Options::default(), path); /// ``` pub struct DBIterator { raw: DBRawIterator, @@ -228,30 +242,31 @@ impl DBRawIterator { /// # Examples /// /// ```rust - /// use rocksdb::DB; - /// - /// let mut db = DB::open_default("path/for/rocksdb/storage5").unwrap(); - /// let mut iter = db.raw_iterator(); - /// - /// // Iterate all keys from the start in lexicographic order + /// use rocksdb::{DB, Options}; /// - /// iter.seek_to_first(); + /// let path = "_path_for_rocksdb_storage5"; + /// { + /// let db = DB::open_default(path).unwrap(); + /// let mut iter = db.raw_iterator(); /// - /// while iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); + /// // Iterate all keys from the start in lexicographic order + /// iter.seek_to_first(); /// - /// iter.next(); - /// } - /// - /// // Read just the first key + /// while iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// iter.next(); + /// } /// - /// iter.seek_to_first(); + /// // Read just the first key + /// iter.seek_to_first(); /// - /// if iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); - /// } else { - /// // There are no keys in the database + /// if iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// } else { + /// // There are no keys in the database + /// } /// } + /// let _ = DB::destroy(&Options::default(), path); /// ``` pub fn seek_to_first(&mut self) { unsafe { @@ -264,30 +279,31 @@ impl DBRawIterator { /// # Examples /// /// ```rust - /// use rocksdb::DB; - /// - /// let mut db = DB::open_default("path/for/rocksdb/storage6").unwrap(); - /// let mut iter = db.raw_iterator(); + /// use rocksdb::{DB, Options}; /// - /// // Iterate all keys from the end in reverse lexicographic order + /// let path = "_path_for_rocksdb_storage6"; + /// { + /// let db = DB::open_default(path).unwrap(); + /// let mut iter = db.raw_iterator(); /// - /// iter.seek_to_last(); - /// - /// while iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); - /// - /// iter.prev(); - /// } + /// // Iterate all keys from the end in reverse lexicographic order + /// iter.seek_to_last(); /// - /// // Read just the last key + /// while iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// iter.prev(); + /// } /// - /// iter.seek_to_last(); + /// // Read just the last key + /// iter.seek_to_last(); /// - /// if iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); - /// } else { - /// // There are no keys in the database + /// if iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// } else { + /// // There are no keys in the database + /// } /// } + /// let _ = DB::destroy(&Options::default(), path); /// ``` pub fn seek_to_last(&mut self) { unsafe { @@ -303,20 +319,23 @@ impl DBRawIterator { /// # Examples /// /// ```rust - /// use rocksdb::DB; + /// use rocksdb::{DB, Options}; /// - /// let mut db = DB::open_default("path/for/rocksdb/storage7").unwrap(); - /// let mut iter = db.raw_iterator(); + /// let path = "_path_for_rocksdb_storage7"; + /// { + /// let db = DB::open_default(path).unwrap(); + /// let mut iter = db.raw_iterator(); /// - /// // Read the first key that starts with 'a' + /// // Read the first key that starts with 'a' + /// iter.seek(b"a"); /// - /// iter.seek(b"a"); - /// - /// if iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); - /// } else { - /// // There are no keys in the database + /// if iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// } else { + /// // There are no keys in the database + /// } /// } + /// let _ = DB::destroy(&Options::default(), path); /// ``` pub fn seek(&mut self, key: &[u8]) { unsafe { @@ -337,20 +356,24 @@ impl DBRawIterator { /// # Examples /// /// ```rust - /// use rocksdb::DB; - /// - /// let mut db = DB::open_default("path/for/rocksdb/storage8").unwrap(); - /// let mut iter = db.raw_iterator(); + /// use rocksdb::{DB, Options}; /// - /// // Read the last key that starts with 'a' + /// let path = "_path_for_rocksdb_storage8"; + /// { + /// let db = DB::open_default(path).unwrap(); + /// let mut iter = db.raw_iterator(); /// - /// iter.seek_for_prev(b"b"); + /// // Read the last key that starts with 'a' + /// iter.seek_for_prev(b"b"); /// - /// if iter.valid() { - /// println!("{:?} {:?}", iter.key(), iter.value()); - /// } else { - /// // There are no keys in the database + /// if iter.valid() { + /// println!("{:?} {:?}", iter.key(), iter.value()); + /// } else { + /// // There are no keys in the database + /// } /// } + /// let _ = DB::destroy(&Options::default(), path); + /// ``` pub fn seek_for_prev(&mut self, key: &[u8]) { unsafe { ffi::rocksdb_iter_seek_for_prev( @@ -640,9 +663,7 @@ impl DB { if let Err(e) = fs::create_dir_all(&path) { return Err(Error::new(format!( - "Failed to create RocksDB\ - directory: `{:?}`.", - e + "Failed to create RocksDB directory: `{:?}`.", e ))); } @@ -1395,6 +1416,39 @@ impl DBVector { } } +/// Retrieves a list of column families names from a given path. +pub fn get_cf_names>(path: P) -> Result, Error> { + let opts = Options::default(); + let cpath = to_cpath(path)?; + let result: Vec; + + unsafe { + let mut cflen: size_t = 0; + let column_fams_raw = ffi_try!(ffi::rocksdb_list_column_families( + opts.inner, + cpath.as_ptr() as *const _, + &mut cflen, + )); + let column_fams = slice::from_raw_parts(column_fams_raw, cflen as usize); + result = column_fams + .iter() + .map(|cf| CStr::from_ptr(*cf).to_string_lossy().into_owned()) + .collect(); + ffi::rocksdb_list_column_families_destroy(column_fams_raw, cflen); + } + + Ok(result) +} + +fn to_cpath>(path: P) -> Result { + match CString::new(path.as_ref().to_string_lossy().as_bytes()) { + Ok(c) => Ok(c), + Err(_) => Err(Error::new( + "Failed to convert path to CString when opening DB.".to_owned(), + )), + } +} + #[test] fn test_db_vector() { use std::mem; @@ -1425,17 +1479,22 @@ fn external() { #[test] fn errors_do_stuff() { let path = "_rust_rocksdb_error"; - let _db = DB::open_default(path).unwrap(); - let opts = Options::default(); - // The DB will still be open when we try to destroy it and the lock should fail. - match DB::destroy(&opts, path) { - Err(s) => { - let message = s.to_string(); - assert!(message.find("IO error:").is_some()); - assert!(message.find("_rust_rocksdb_error/LOCK:").is_some()); + { + let _db = DB::open_default(path).unwrap(); + let opts = Options::default(); + // The DB will still be open when we try to destroy it and the lock should fail. + match DB::destroy(&opts, path) { + Err(s) => { + let message = s.to_string(); + assert!(message.find("IO error:").is_some()); + assert!(message.find("_rust_rocksdb_error/LOCK:").is_some()); + } + Ok(_) => panic!("should fail"), } - Ok(_) => panic!("should fail"), } + let opts = Options::default(); + let result = DB::destroy(&opts, path); + assert!(result.is_ok()); } #[test] @@ -1562,4 +1621,31 @@ fn set_option_test() { ]; db.set_options(&multiple_options).unwrap(); } + assert!(DB::destroy(&Options::default(), path).is_ok()); +} + +#[test] +fn get_cf_names_test() { + let path = "_rust_rocksdb_get_cf_names"; + let opts = Options::default(); + { + let db = DB::open_default(path).unwrap(); + let cf_one = db.create_cf("one", &opts).unwrap(); + let result = db.put_cf(cf_one, b"1", b"1"); + assert!(result.is_ok()); + let cf_two = db.create_cf("two", &opts).unwrap(); + let result = db.put_cf(cf_two, b"2", b"2"); + assert!(result.is_ok()); + } + { + let cf_names = get_cf_names(path).unwrap(); + let cfs = cf_names.iter().map(String::as_str).collect::>(); + assert_eq!(cfs, vec!["default", "one", "two"]); + let db = DB::open_cf(&opts, path, cfs.as_slice()).unwrap(); + let cf_one = db.cf_handle("one").unwrap(); + assert_eq!(db.get_cf(cf_one, b"1").unwrap().unwrap().as_ref(), b"1"); + let cf_two = db.cf_handle("two").unwrap(); + assert_eq!(db.get_cf(cf_two, b"2").unwrap().unwrap().as_ref(), b"2"); + } + assert!(DB::destroy(&opts, path).is_ok()); } diff --git a/src/lib.rs b/src/lib.rs index cb46114..30fee96 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,22 +18,28 @@ //! # Examples //! //! ``` -//! use rocksdb::DB; -//! // NB: db is automatically closed at end of lifetime -//! let db = DB::open_default("path/for/rocksdb/storage").unwrap(); -//! db.put(b"my key", b"my value"); -//! match db.get(b"my key") { -//! Ok(Some(value)) => println!("retrieved value {}", value.to_utf8().unwrap()), -//! Ok(None) => println!("value not found"), -//! Err(e) => println!("operational problem encountered: {}", e), -//! } -//! db.delete(b"my key").unwrap(); +//! use rocksdb::{DB, Options}; +//! // NB: db is automatically closed at end of lifetime +//! let path = "_path_for_rocksdb_storage"; +//! { +//! let db = DB::open_default(path).unwrap(); +//! db.put(b"my key", b"my value"); +//! match db.get(b"my key") { +//! Ok(Some(value)) => println!("retrieved value {}", value.to_utf8().unwrap()), +//! Ok(None) => println!("value not found"), +//! Err(e) => println!("operational problem encountered: {}", e), +//! } +//! db.delete(b"my key").unwrap(); +//! } +//! let _ = DB::destroy(&Options::default(), path); //! ``` //! //! Opening a database and a single column family with custom options: //! //! ``` //! use rocksdb::{DB, ColumnFamilyDescriptor, Options}; +//! +//! let path = "_path_for_rocksdb_storage_with_cfs"; //! let mut cf_opts = Options::default(); //! cf_opts.set_max_write_buffer_number(16); //! let cf = ColumnFamilyDescriptor::new("cf1", cf_opts); @@ -41,8 +47,10 @@ //! let mut db_opts = Options::default(); //! db_opts.create_missing_column_families(true); //! db_opts.create_if_missing(true); -//! -//! let db = DB::open_cf_descriptors(&db_opts, "path/for/rocksdb/storage_with_cfs", vec![cf]).unwrap(); +//! { +//! let db = DB::open_cf_descriptors(&db_opts, path, vec![cf]).unwrap(); +//! } +//! let _ = DB::destroy(&db_opts, path); //! ``` //! @@ -63,7 +71,7 @@ mod slice_transform; pub use compaction_filter::Decision as CompactionDecision; pub use db::{ - new_bloom_filter, DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, + get_cf_names, new_bloom_filter, DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode, DBVector, Direction, IteratorMode, ReadOptions, Snapshot, WriteBatch, }; @@ -211,20 +219,23 @@ pub struct Options { /// Making an unsafe write of a batch: /// /// ``` -/// use rocksdb::{DB, WriteBatch, WriteOptions}; +/// use rocksdb::{DB, Options, WriteBatch, WriteOptions}; /// -/// let db = DB::open_default("path/for/rocksdb/storageY").unwrap(); +/// let path = "_path_for_rocksdb_storageY"; +/// { +/// let db = DB::open_default(path).unwrap(); +/// let mut batch = WriteBatch::default(); +/// batch.put(b"my key", b"my value"); +/// batch.put(b"key2", b"value2"); +/// batch.put(b"key3", b"value3"); /// -/// let mut batch = WriteBatch::default(); -/// batch.put(b"my key", b"my value"); -/// batch.put(b"key2", b"value2"); -/// batch.put(b"key3", b"value3"); +/// let mut write_options = WriteOptions::default(); +/// write_options.set_sync(false); +/// write_options.disable_wal(true); /// -/// let mut write_options = WriteOptions::default(); -/// write_options.set_sync(false); -/// write_options.disable_wal(true); -/// -/// db.write_opt(batch, &write_options); +/// db.write_opt(batch, &write_options); +/// } +/// let _ = DB::destroy(&Options::default(), path); /// ``` pub struct WriteOptions { inner: *mut ffi::rocksdb_writeoptions_t, diff --git a/src/merge_operator.rs b/src/merge_operator.rs index 69551d8..3b1abde 100644 --- a/src/merge_operator.rs +++ b/src/merge_operator.rs @@ -38,18 +38,21 @@ //! } //! //! fn main() { -//! let path = "path/to/rocksdb"; +//! let path = "_rust_path_to_rocksdb"; //! let mut opts = Options::default(); //! opts.create_if_missing(true); //! opts.set_merge_operator("test operator", concat_merge, None); -//! let db = DB::open(&opts, path).unwrap(); -//! let p = db.put(b"k1", b"a"); -//! db.merge(b"k1", b"b"); -//! db.merge(b"k1", b"c"); -//! db.merge(b"k1", b"d"); -//! db.merge(b"k1", b"efg"); -//! let r = db.get(b"k1"); -//! assert!(r.unwrap().unwrap().to_utf8().unwrap() == "abcdefg"); +//! { +//! let db = DB::open(&opts, path).unwrap(); +//! let p = db.put(b"k1", b"a"); +//! db.merge(b"k1", b"b"); +//! db.merge(b"k1", b"c"); +//! db.merge(b"k1", b"d"); +//! db.merge(b"k1", b"efg"); +//! let r = db.get(b"k1"); +//! assert!(r.unwrap().unwrap().to_utf8().unwrap() == "abcdefg"); +//! } +//! let _ = DB::destroy(&opts, path); //! } //! ``` From 9f44066266cb5551a71247ff80251fabf0582b46 Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Tue, 15 Jan 2019 21:37:01 +0200 Subject: [PATCH 3/5] Fixed a number of year in the changelog --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4845425..cfb36ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,11 @@ # Changelog -## 0.11.0 (2018-01-10) +## 0.11.0 (2019-01-10) ### Announcements * This is the first release under the new [Maintainership](MAINTAINERSHIP.md) model. - Three contributors have been selected to help maintain this library -- Oleksandr Anyshchenko ([@aleksus](https://github.com/aleksuss)), Jordan Terrell ([@iSynaptic](https://github.com/iSynaptic)), and Ilya Bogdanov ([@vitvakatu](https://github.com/vitvakatu)). Many thanks to Tyler Neely ([@spacejam](https://github.com/spacejam)) for your support while taking on this new role. + Three contributors have been selected to help maintain this library -- Oleksandr Anyshchenko ([@aleksuss](https://github.com/aleksuss)), Jordan Terrell ([@iSynaptic](https://github.com/iSynaptic)), and Ilya Bogdanov ([@vitvakatu](https://github.com/vitvakatu)). Many thanks to Tyler Neely ([@spacejam](https://github.com/spacejam)) for your support while taking on this new role. * A [gitter.im chat room](https://gitter.im/rust-rocksdb/Lobby) has been created. Although it's not guaranteed to be "staffed", it may help to collaborate on changes to `rust-rocksdb`. From b9681c861320e7e936d97792a7d2fb09ea883ae9 Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Tue, 15 Jan 2019 22:02:36 +0200 Subject: [PATCH 4/5] Disabled warnings when builds c++ deps --- librocksdb-sys/build.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/librocksdb-sys/build.rs b/librocksdb-sys/build.rs index 7eb4d0e..acccadd 100644 --- a/librocksdb-sys/build.rs +++ b/librocksdb-sys/build.rs @@ -231,6 +231,7 @@ fn build_zlib() { } } + compiler.flag_if_supported("-Wno-implicit-function-declaration"); compiler.opt_level(3); compiler.compile("libz.a"); } @@ -251,6 +252,7 @@ fn build_bzip2() { .define("_FILE_OFFSET_BITS", Some("64")) .define("BZ_NO_STDIO", None); + compiler.extra_warnings(false); compiler.opt_level(3); compiler.compile("libbz2.a"); } From deb6001cae43fd9def25a76ff8d4b55698fefd9a Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Wed, 30 Jan 2019 16:23:14 +0200 Subject: [PATCH 5/5] Usage `to_path` func and `get_cf_names` func was removed as redundant --- src/db.rs | 66 +++--------------------------------------------------- src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 64 deletions(-) diff --git a/src/db.rs b/src/db.rs index 11149fb..f0464e3 100644 --- a/src/db.rs +++ b/src/db.rs @@ -740,17 +740,7 @@ impl DB { } pub fn list_cf>(opts: &Options, path: P) -> Result, Error> { - let cpath = match CString::new(path.as_ref().to_string_lossy().as_bytes()) { - Ok(c) => c, - Err(_) => { - return Err(Error::new( - "Failed to convert path to CString \ - when opening DB." - .to_owned(), - )) - } - }; - + let cpath = to_cpath(path)?; let mut length = 0; unsafe { @@ -770,7 +760,7 @@ impl DB { } pub fn destroy>(opts: &Options, path: P) -> Result<(), Error> { - let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap(); + let cpath = to_cpath(path)?; unsafe { ffi_try!(ffi::rocksdb_destroy_db(opts.inner, cpath.as_ptr(),)); } @@ -778,7 +768,7 @@ impl DB { } pub fn repair>(opts: Options, path: P) -> Result<(), Error> { - let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap(); + let cpath = to_cpath(path)?; unsafe { ffi_try!(ffi::rocksdb_repair_db(opts.inner, cpath.as_ptr(),)); } @@ -1416,30 +1406,6 @@ impl DBVector { } } -/// Retrieves a list of column families names from a given path. -pub fn get_cf_names>(path: P) -> Result, Error> { - let opts = Options::default(); - let cpath = to_cpath(path)?; - let result: Vec; - - unsafe { - let mut cflen: size_t = 0; - let column_fams_raw = ffi_try!(ffi::rocksdb_list_column_families( - opts.inner, - cpath.as_ptr() as *const _, - &mut cflen, - )); - let column_fams = slice::from_raw_parts(column_fams_raw, cflen as usize); - result = column_fams - .iter() - .map(|cf| CStr::from_ptr(*cf).to_string_lossy().into_owned()) - .collect(); - ffi::rocksdb_list_column_families_destroy(column_fams_raw, cflen); - } - - Ok(result) -} - fn to_cpath>(path: P) -> Result { match CString::new(path.as_ref().to_string_lossy().as_bytes()) { Ok(c) => Ok(c), @@ -1623,29 +1589,3 @@ fn set_option_test() { } assert!(DB::destroy(&Options::default(), path).is_ok()); } - -#[test] -fn get_cf_names_test() { - let path = "_rust_rocksdb_get_cf_names"; - let opts = Options::default(); - { - let db = DB::open_default(path).unwrap(); - let cf_one = db.create_cf("one", &opts).unwrap(); - let result = db.put_cf(cf_one, b"1", b"1"); - assert!(result.is_ok()); - let cf_two = db.create_cf("two", &opts).unwrap(); - let result = db.put_cf(cf_two, b"2", b"2"); - assert!(result.is_ok()); - } - { - let cf_names = get_cf_names(path).unwrap(); - let cfs = cf_names.iter().map(String::as_str).collect::>(); - assert_eq!(cfs, vec!["default", "one", "two"]); - let db = DB::open_cf(&opts, path, cfs.as_slice()).unwrap(); - let cf_one = db.cf_handle("one").unwrap(); - assert_eq!(db.get_cf(cf_one, b"1").unwrap().unwrap().as_ref(), b"1"); - let cf_two = db.cf_handle("two").unwrap(); - assert_eq!(db.get_cf(cf_two, b"2").unwrap().unwrap().as_ref(), b"2"); - } - assert!(DB::destroy(&opts, path).is_ok()); -} diff --git a/src/lib.rs b/src/lib.rs index 30fee96..a39262f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -71,7 +71,7 @@ mod slice_transform; pub use compaction_filter::Decision as CompactionDecision; pub use db::{ - get_cf_names, new_bloom_filter, DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, + new_bloom_filter, DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode, DBVector, Direction, IteratorMode, ReadOptions, Snapshot, WriteBatch, };