Applying changes from rustfmt...

master
Jordan Terrell 6 years ago
parent 02d5b93612
commit 896dbc6c61
  1. 3
      librocksdb-sys/build.rs
  2. 2
      src/backup.rs
  3. 3
      src/compaction_filter.rs
  4. 210
      src/db.rs
  5. 4
      src/lib.rs
  6. 2
      tests/test_compationfilter.rs
  7. 12
      tests/test_db.rs
  8. 25
      tests/test_iterator.rs

@ -119,7 +119,8 @@ fn build_rocksdb() {
.filter(|file| match *file {
"port/port_posix.cc" | "env/env_posix.cc" | "env/io_posix.cc" => false,
_ => true,
}).collect::<Vec<&'static str>>();
})
.collect::<Vec<&'static str>>();
// Add Windows-specific sources
lib_sources.push("port/win/port_win.cc");

@ -46,7 +46,7 @@ impl BackupEngine {
"Failed to convert path to CString \
when opening backup engine"
.to_owned(),
))
));
}
};

@ -45,7 +45,8 @@ impl<F> CompactionFilterFn for F
where
F: FnMut(u32, &[u8], &[u8]) -> Decision,
F: Send + 'static,
{}
{
}
pub struct CompactionFilterCallback<F>
where

@ -154,7 +154,7 @@ pub struct Snapshot<'a> {
/// ```
pub struct DBRawIterator<'a> {
inner: *mut ffi::rocksdb_iterator_t,
db: PhantomData<&'a DB>
db: PhantomData<&'a DB>,
}
/// An iterator over a database or column family, with specifiable
@ -214,7 +214,7 @@ impl<'a> DBRawIterator<'a> {
unsafe {
DBRawIterator {
inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner),
db: PhantomData
db: PhantomData,
}
}
}
@ -227,7 +227,7 @@ impl<'a> DBRawIterator<'a> {
unsafe {
Ok(DBRawIterator {
inner: ffi::rocksdb_create_iterator_cf(db.inner, readopts.inner, cf_handle.inner),
db: PhantomData
db: PhantomData,
})
}
}
@ -603,7 +603,11 @@ impl<'a> Snapshot<'a> {
DBRawIterator::new(self.db, &readopts)
}
pub fn raw_iterator_cf_opt(&self, cf_handle: ColumnFamily, mut readopts: ReadOptions) -> Result<DBRawIterator, Error> {
pub fn raw_iterator_cf_opt(
&self,
cf_handle: ColumnFamily,
mut readopts: ReadOptions,
) -> Result<DBRawIterator, Error> {
readopts.set_snapshot(self);
DBRawIterator::new_cf(self.db, cf_handle, &readopts)
}
@ -613,17 +617,30 @@ impl<'a> Snapshot<'a> {
self.get_opt(key, readopts)
}
pub fn get_cf<K: AsRef<[u8]>>(&self, cf: ColumnFamily, key: K) -> Result<Option<DBVector>, Error> {
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
key: K,
) -> Result<Option<DBVector>, Error> {
let readopts = ReadOptions::default();
self.get_cf_opt(cf, key.as_ref(), readopts)
}
pub fn get_opt<K: AsRef<[u8]>>(&self, key: K, mut readopts: ReadOptions) -> Result<Option<DBVector>, Error> {
pub fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
mut readopts: ReadOptions,
) -> Result<Option<DBVector>, Error> {
readopts.set_snapshot(self);
self.db.get_opt(key.as_ref(), &readopts)
}
pub fn get_cf_opt<K: AsRef<[u8]>>(&self, cf: ColumnFamily, key: K, mut readopts: ReadOptions) -> Result<Option<DBVector>, Error> {
pub fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
key: K,
mut readopts: ReadOptions,
) -> Result<Option<DBVector>, Error> {
readopts.set_snapshot(self);
self.db.get_cf_opt(cf, key.as_ref(), &readopts)
}
@ -690,13 +707,14 @@ impl DB {
"Failed to convert path to CString \
when opening DB."
.to_owned(),
))
));
}
};
if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!(
"Failed to create RocksDB directory: `{:?}`.", e
"Failed to create RocksDB directory: `{:?}`.",
e
)));
}
@ -755,7 +773,8 @@ impl DB {
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cf_map.write()
cf_map
.write()
.map_err(|e| Error::new(e.to_string()))?
.insert(n.name.clone(), h);
}
@ -829,7 +848,11 @@ impl DB {
self.write_opt(batch, &wo)
}
pub fn get_opt<K: AsRef<[u8]>>(&self, key: K, readopts: &ReadOptions) -> Result<Option<DBVector>, Error> {
pub fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBVector>, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. \
@ -902,7 +925,11 @@ impl DB {
}
}
pub fn get_cf<K: AsRef<[u8]>>(&self, cf: ColumnFamily, key: K) -> Result<Option<DBVector>, Error> {
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
key: K,
) -> Result<Option<DBVector>, Error> {
self.get_cf_opt(cf, key.as_ref(), &ReadOptions::default())
}
@ -914,7 +941,7 @@ impl DB {
"Failed to convert path to CString \
when opening rocksdb"
.to_owned(),
))
));
}
};
let cf = unsafe {
@ -924,10 +951,12 @@ impl DB {
cname.as_ptr(),
));
self.cfs.write().map_err(|e| Error::new(e.to_string()))?
self.cfs
.write()
.map_err(|e| Error::new(e.to_string()))?
.insert(name.to_string(), cf_handle);
ColumnFamily {
ColumnFamily {
inner: cf_handle,
db: PhantomData,
}
@ -936,29 +965,29 @@ impl DB {
}
pub fn drop_cf(&self, name: &str) -> Result<(), Error> {
if let Some(cf) = self.cfs.write().map_err(|e| Error::new(e.to_string()))?
.remove(name) {
if let Some(cf) = self
.cfs
.write()
.map_err(|e| Error::new(e.to_string()))?
.remove(name)
{
unsafe {
ffi_try!(ffi::rocksdb_drop_column_family(self.inner, cf,));
}
Ok(())
} else {
Err(Error::new(
format!("Invalid column family: {}", name).to_owned()
format!("Invalid column family: {}", name).to_owned(),
))
}
}
/// Return the underlying column family handle.
pub fn cf_handle(&self, name: &str) -> Option<ColumnFamily> {
self.cfs
.read()
.ok()?
.get(name)
.map(|h| ColumnFamily {
inner: *h,
db: PhantomData
})
self.cfs.read().ok()?.get(name).map(|h| ColumnFamily {
inner: *h,
db: PhantomData,
})
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
@ -982,7 +1011,11 @@ impl DB {
pub fn prefix_iterator<P: AsRef<[u8]>>(&self, prefix: P) -> DBIterator {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
DBIterator::new(self, &opts, IteratorMode::From(prefix.as_ref(), Direction::Forward))
DBIterator::new(
self,
&opts,
IteratorMode::From(prefix.as_ref(), Direction::Forward),
)
}
pub fn iterator_cf(
@ -1007,7 +1040,7 @@ impl DB {
pub fn prefix_iterator_cf<P: AsRef<[u8]>>(
&self,
cf_handle: ColumnFamily,
prefix: P
prefix: P,
) -> Result<DBIterator, Error> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
@ -1034,14 +1067,14 @@ impl DB {
}
pub fn put_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put(
self.inner,
writeopts.inner,
@ -1060,15 +1093,15 @@ impl DB {
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put_cf(
self.inner,
writeopts.inner,
@ -1082,15 +1115,11 @@ impl DB {
}
}
pub fn merge_opt<K, V>(
&self,
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn merge_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
@ -1114,14 +1143,14 @@ impl DB {
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_merge_cf(
self.inner,
writeopts.inner,
@ -1135,9 +1164,13 @@ impl DB {
}
}
pub fn delete_opt<K: AsRef<[u8]>>(&self, key: K, writeopts: &WriteOptions) -> Result<(), Error> {
pub fn delete_opt<K: AsRef<[u8]>>(
&self,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete(
self.inner,
@ -1155,7 +1188,6 @@ impl DB {
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
@ -1171,30 +1203,34 @@ impl DB {
}
pub fn put<K, V>(&self, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.put_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn put_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn put_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.put_cf_opt(cf, key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn merge<K, V>(&self, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn merge<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.merge_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn merge_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn merge_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.merge_cf_opt(cf, key.as_ref(), value.as_ref(), &WriteOptions::default())
}
@ -1284,9 +1320,10 @@ impl WriteBatch {
/// Insert a value into the database under the given key.
pub fn put<K, V>(&mut self, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
@ -1302,10 +1339,11 @@ impl WriteBatch {
}
}
pub fn put_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn put_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
@ -1322,10 +1360,11 @@ impl WriteBatch {
}
}
pub fn merge<K, V>(&mut self, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn merge<K, V>(&mut self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
@ -1341,10 +1380,11 @@ impl WriteBatch {
}
}
pub fn merge_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where K: AsRef<[u8]>,
V: AsRef<[u8]> {
pub fn merge_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
@ -1458,7 +1498,7 @@ impl ReadOptions {
pub fn set_iterate_upper_bound<K: AsRef<[u8]>>(&mut self, key: K) {
let key = key.as_ref();
unsafe {
ffi::rocksdb_readoptions_set_iterate_upper_bound(
self.inner,

@ -71,8 +71,8 @@ mod slice_transform;
pub use compaction_filter::Decision as CompactionDecision;
pub use db::{
DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode,
DBVector, Direction, IteratorMode, ReadOptions, Snapshot, WriteBatch,
DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode, DBVector,
Direction, IteratorMode, ReadOptions, Snapshot, WriteBatch,
};
pub use slice_transform::SliceTransform;

@ -16,7 +16,7 @@ extern crate rocksdb;
mod util;
use rocksdb::{CompactionDecision, DB, Options};
use rocksdb::{CompactionDecision, Options, DB};
use util::DBPath;
#[cfg(test)]

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate rocksdb;
extern crate libc;
extern crate rocksdb;
mod util;
use libc::{size_t};
use libc::size_t;
use rocksdb::{DB, DBVector, Error, IteratorMode, Options, WriteBatch};
use rocksdb::{DBVector, Error, IteratorMode, Options, WriteBatch, DB};
use util::DBPath;
#[test]
@ -38,7 +38,7 @@ fn external() {
{
let db = DB::open_default(&path).unwrap();
assert!(db.put(b"k1", b"v1111").is_ok());
let r: Result<Option<DBVector>, Error> = db.get(b"k1");
@ -130,14 +130,14 @@ fn snapshot_test() {
let path = DBPath::new("_rust_rocksdb_snapshottest");
{
let db = DB::open_default(&path).unwrap();
assert!(db.put(b"k1", b"v1111").is_ok());
let snap = db.snapshot();
assert!(snap.get(b"k1").unwrap().unwrap().to_utf8().unwrap() == "v1111");
assert!(db.put(b"k2", b"v2222").is_ok());
assert!(db.get(b"k2").unwrap().is_some());
assert!(snap.get(b"k2").unwrap().is_none());
}

@ -214,20 +214,20 @@ fn test_prefix_iterator_uses_full_prefix() {
// Explanation: `db.prefix_iterator` sets the underlying
// options to seek to the first key that matches the *entire*
// `prefix`. From there, the iterator will continue to read pairs
// as long as the prefix extracted from `key` matches the
// as long as the prefix extracted from `key` matches the
// prefix extracted from `prefix`.
let path = DBPath::new("_rust_rocksdb_prefixiteratorusesfullprefixtest");
{
let data = [
([0,0,0,0], b"111"),
([0,0,0,1], b"222"),
([0,1,0,1], b"333"),
([0,1,1,1], b"444"),
([0,1,2,1], b"555"),
([0,2,0,0], b"666"),
([2,0,0,0], b"777"),
([2,2,2,2], b"888")
([0, 0, 0, 0], b"111"),
([0, 0, 0, 1], b"222"),
([0, 1, 0, 1], b"333"),
([0, 1, 1, 1], b"444"),
([0, 1, 2, 1], b"555"),
([0, 2, 0, 0], b"666"),
([2, 0, 0, 0], b"777"),
([2, 2, 2, 2], b"888"),
];
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
@ -242,9 +242,10 @@ fn test_prefix_iterator_uses_full_prefix() {
assert!(db.put(key, *value).is_ok());
}
let prefix = [0,1,1];
let results: Vec<_> = db.prefix_iterator(&prefix)
.map(|(_,v)| std::str::from_utf8(&v).unwrap().to_string())
let prefix = [0, 1, 1];
let results: Vec<_> = db
.prefix_iterator(&prefix)
.map(|(_, v)| std::str::from_utf8(&v).unwrap().to_string())
.collect();
assert_eq!(results, vec!("444", "555", "666"));

Loading…
Cancel
Save