Merge branch 'urbint-update-cc-crate'

master
Tyler Neely 7 years ago
commit 4d48b70bc2
No known key found for this signature in database
GPG Key ID: 23E6C4FBEAE5E4E3
  1. 4
      .travis.yml
  2. 3
      librocksdb-sys/Cargo.toml
  3. 21
      librocksdb-sys/build.rs
  4. 1019
      librocksdb-sys/src/lib.rs
  5. 10
      librocksdb-sys/tests/ffi.rs
  6. 1
      rustfmt.toml
  7. 26
      src/backup.rs
  8. 24
      src/compaction_filter.rs
  9. 7
      src/comparator.rs
  10. 390
      src/db.rs
  11. 57
      src/db_options.rs
  12. 2
      src/ffi_util.rs
  13. 7
      src/lib.rs
  14. 34
      src/merge_operator.rs

@ -10,8 +10,12 @@ addons:
apt: apt:
sources: sources:
- ubuntu-toolchain-r-test - ubuntu-toolchain-r-test
- llvm-toolchain-trusty
packages: packages:
- g++-5 - g++-5
- llvm-3.9-dev
- libclang-3.9-dev
- clang-3.9
script: script:
- cargo test --manifest-path=librocksdb-sys/Cargo.toml - cargo test --manifest-path=librocksdb-sys/Cargo.toml

@ -22,5 +22,6 @@ libc = "0.2"
const-cstr = "0.2" const-cstr = "0.2"
[build-dependencies] [build-dependencies]
gcc = { version = "0.3", features = ["parallel"] } cc = { version = "1.0", features = ["parallel"] }
make-cmd = "0.1" make-cmd = "0.1"
bindgen = "0.29"

@ -1,6 +1,9 @@
extern crate gcc; extern crate cc;
extern crate bindgen;
use std::env;
use std::fs; use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) { fn link(name: &str, bundled: bool) {
use std::env::var; use std::env::var;
@ -30,7 +33,19 @@ fn build_rocksdb() {
println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/"); println!("cargo:rerun-if-changed=rocksdb/");
let mut config = gcc::Config::new(); let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.hide_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
let mut config = cc::Build::new();
config.include("rocksdb/include/"); config.include("rocksdb/include/");
config.include("rocksdb/"); config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/"); config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
@ -115,7 +130,7 @@ fn build_rocksdb() {
} }
fn build_snappy() { fn build_snappy() {
let mut config = gcc::Config::new(); let mut config = cc::Build::new();
config.include("snappy/"); config.include("snappy/");
config.include("."); config.include(".");

File diff suppressed because it is too large Load Diff

@ -429,7 +429,7 @@ fn ffi() {
rocksdb_options_set_block_based_table_factory(options, table_options); rocksdb_options_set_block_based_table_factory(options, table_options);
let no_compression = rocksdb_no_compression; let no_compression = rocksdb_no_compression;
rocksdb_options_set_compression(options, no_compression); rocksdb_options_set_compression(options, no_compression as i32);
rocksdb_options_set_compression_options(options, -14, -1, 0, 0); rocksdb_options_set_compression_options(options, -14, -1, 0, 0);
let compression_levels = vec![ let compression_levels = vec![
no_compression, no_compression,
@ -892,15 +892,15 @@ fn ffi() {
let mut cf_options = rocksdb_options_create(); let mut cf_options = rocksdb_options_create();
let cf_names: [*const c_char; 2] = [cstrp!("default"), cstrp!("cf1")]; let mut cf_names: [*const c_char; 2] = [cstrp!("default"), cstrp!("cf1")];
let cf_opts: [*const rocksdb_options_t; 2] = [cf_options, cf_options]; let mut cf_opts: [*const rocksdb_options_t; 2] = [cf_options, cf_options];
let mut handles: [*mut rocksdb_column_family_handle_t; 2] = [ptr::null_mut(), let mut handles: [*mut rocksdb_column_family_handle_t; 2] = [ptr::null_mut(),
ptr::null_mut()]; ptr::null_mut()];
db = rocksdb_open_column_families(db_options, db = rocksdb_open_column_families(db_options,
dbname, dbname,
2, 2,
cf_names.as_ptr(), cf_names.as_mut_ptr(),
cf_opts.as_ptr(), cf_opts.as_mut_ptr(),
handles.as_mut_ptr(), handles.as_mut_ptr(),
&mut err); &mut err);
CheckNoError!(err); CheckNoError!(err);

@ -1,3 +1,4 @@
reorder_imports = true reorder_imports = true
max_width = 100 max_width = 100
ideal_width = 100 ideal_width = 100
trailing_comma = always

@ -35,21 +35,24 @@ pub struct RestoreOptions {
impl BackupEngine { impl BackupEngine {
/// Open a backup engine with the specified options. /// Open a backup engine with the specified options.
pub fn open<P: AsRef<Path>>(opts: &BackupEngineOptions, pub fn open<P: AsRef<Path>>(
path: P) opts: &BackupEngineOptions,
-> Result<BackupEngine, Error> { path: P,
) -> Result<BackupEngine, Error> {
let path = path.as_ref(); let path = path.as_ref();
let cpath = match CString::new(path.to_string_lossy().as_bytes()) { let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c, Ok(c) => c,
Err(_) => { Err(_) => {
return Err(Error::new("Failed to convert path to CString \ return Err(Error::new(
"Failed to convert path to CString \
when opening backup engine" when opening backup engine"
.to_owned())) .to_owned(),
))
} }
}; };
let be: *mut ffi::rocksdb_backup_engine_t; let be: *mut ffi::rocksdb_backup_engine_t;
unsafe { be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) } unsafe { be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr(),)) }
if be.is_null() { if be.is_null() {
return Err(Error::new("Could not initialize backup engine.".to_owned())); return Err(Error::new("Could not initialize backup engine.".to_owned()));
@ -60,15 +63,20 @@ impl BackupEngine {
pub fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> { pub fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_backup_engine_create_new_backup(self.inner, db.inner)); ffi_try!(ffi::rocksdb_backup_engine_create_new_backup(
self.inner,
db.inner,
));
Ok(()) Ok(())
} }
} }
pub fn purge_old_backups(&mut self, num_backups_to_keep: usize) -> Result<(), Error> { pub fn purge_old_backups(&mut self, num_backups_to_keep: usize) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(self.inner, ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(
num_backups_to_keep as uint32_t)); self.inner,
num_backups_to_keep as uint32_t,
));
Ok(()) Ok(())
} }
} }

@ -43,32 +43,37 @@ pub enum Decision {
/// [set_compaction_filter]: ../struct.Options.html#method.set_compaction_filter /// [set_compaction_filter]: ../struct.Options.html#method.set_compaction_filter
pub trait CompactionFilterFn: FnMut(u32, &[u8], &[u8]) -> Decision {} pub trait CompactionFilterFn: FnMut(u32, &[u8], &[u8]) -> Decision {}
impl<F> CompactionFilterFn for F impl<F> CompactionFilterFn for F
where F: FnMut(u32, &[u8], &[u8]) -> Decision, where
F: Send + 'static F: FnMut(u32, &[u8], &[u8]) -> Decision,
F: Send + 'static,
{ {
} }
pub struct CompactionFilterCallback<F> pub struct CompactionFilterCallback<F>
where F: CompactionFilterFn where
F: CompactionFilterFn,
{ {
pub name: CString, pub name: CString,
pub filter_fn: F, pub filter_fn: F,
} }
pub unsafe extern "C" fn destructor_callback<F>(raw_cb: *mut c_void) pub unsafe extern "C" fn destructor_callback<F>(raw_cb: *mut c_void)
where F: CompactionFilterFn where
F: CompactionFilterFn,
{ {
let _: Box<CompactionFilterCallback<F>> = mem::transmute(raw_cb); let _: Box<CompactionFilterCallback<F>> = mem::transmute(raw_cb);
} }
pub unsafe extern "C" fn name_callback<F>(raw_cb: *mut c_void) -> *const c_char pub unsafe extern "C" fn name_callback<F>(raw_cb: *mut c_void) -> *const c_char
where F: CompactionFilterFn where
F: CompactionFilterFn,
{ {
let cb = &*(raw_cb as *mut CompactionFilterCallback<F>); let cb = &*(raw_cb as *mut CompactionFilterCallback<F>);
cb.name.as_ptr() cb.name.as_ptr()
} }
pub unsafe extern "C" fn filter_callback<F>(raw_cb: *mut c_void, pub unsafe extern "C" fn filter_callback<F>(
raw_cb: *mut c_void,
level: c_int, level: c_int,
raw_key: *const c_char, raw_key: *const c_char,
key_length: size_t, key_length: size_t,
@ -76,9 +81,10 @@ pub unsafe extern "C" fn filter_callback<F>(raw_cb: *mut c_void,
value_length: size_t, value_length: size_t,
new_value: *mut *mut c_char, new_value: *mut *mut c_char,
new_value_length: *mut size_t, new_value_length: *mut size_t,
value_changed: *mut c_uchar) value_changed: *mut c_uchar,
-> c_uchar ) -> c_uchar
where F: CompactionFilterFn where
F: CompactionFilterFn,
{ {
use self::Decision::*; use self::Decision::*;

@ -37,12 +37,13 @@ pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
ptr as *const c_char ptr as *const c_char
} }
pub unsafe extern "C" fn compare_callback(raw_cb: *mut c_void, pub unsafe extern "C" fn compare_callback(
raw_cb: *mut c_void,
a_raw: *const c_char, a_raw: *const c_char,
a_len: size_t, a_len: size_t,
b_raw: *const c_char, b_raw: *const c_char,
b_len: size_t) b_len: size_t,
-> c_int { ) -> c_int {
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback); let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize); let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize);
let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len as usize); let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len as usize);

@ -55,10 +55,10 @@ pub enum DBCompactionStyle {
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
pub enum DBRecoveryMode { pub enum DBRecoveryMode {
TolerateCorruptedTailRecords = ffi::rocksdb_recovery_mode_tolerate_corrupted_tail_records as isize, TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
AbsoluteConsistency = ffi::rocksdb_recovery_mode_absolute_consistency as isize, AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
PointInTime = ffi::rocksdb_recovery_mode_point_in_time as isize, PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
SkipAnyCorruptedRecord = ffi::rocksdb_recovery_mode_skip_any_corrupted_record as isize, SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
} }
/// An atomic batch of write operations. /// An atomic batch of write operations.
@ -197,17 +197,14 @@ pub enum IteratorMode<'a> {
impl DBRawIterator { impl DBRawIterator {
fn new(db: &DB, readopts: &ReadOptions) -> DBRawIterator { fn new(db: &DB, readopts: &ReadOptions) -> DBRawIterator {
unsafe { unsafe { DBRawIterator { inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner) } }
DBRawIterator {
inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner),
}
}
} }
fn new_cf(db: &DB, fn new_cf(
db: &DB,
cf_handle: ColumnFamily, cf_handle: ColumnFamily,
readopts: &ReadOptions) readopts: &ReadOptions,
-> Result<DBRawIterator, Error> { ) -> Result<DBRawIterator, Error> {
unsafe { unsafe {
Ok(DBRawIterator { Ok(DBRawIterator {
inner: ffi::rocksdb_create_iterator_cf(db.inner, readopts.inner, cf_handle.inner), inner: ffi::rocksdb_create_iterator_cf(db.inner, readopts.inner, cf_handle.inner),
@ -251,7 +248,9 @@ impl DBRawIterator {
/// } /// }
/// ``` /// ```
pub fn seek_to_first(&mut self) { pub fn seek_to_first(&mut self) {
unsafe { ffi::rocksdb_iter_seek_to_first(self.inner); } unsafe {
ffi::rocksdb_iter_seek_to_first(self.inner);
}
} }
/// Seeks to the last key in the database. /// Seeks to the last key in the database.
@ -285,7 +284,9 @@ impl DBRawIterator {
/// } /// }
/// ``` /// ```
pub fn seek_to_last(&mut self) { pub fn seek_to_last(&mut self) {
unsafe { ffi::rocksdb_iter_seek_to_last(self.inner); } unsafe {
ffi::rocksdb_iter_seek_to_last(self.inner);
}
} }
/// Seeks to the specified key or the first key that lexicographically follows it. /// Seeks to the specified key or the first key that lexicographically follows it.
@ -312,7 +313,13 @@ impl DBRawIterator {
/// } /// }
/// ``` /// ```
pub fn seek(&mut self, key: &[u8]) { pub fn seek(&mut self, key: &[u8]) {
unsafe { ffi::rocksdb_iter_seek(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); } unsafe {
ffi::rocksdb_iter_seek(
self.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
);
}
} }
/// Seeks to the specified key, or the first key that lexicographically precedes it. /// Seeks to the specified key, or the first key that lexicographically precedes it.
@ -339,21 +346,31 @@ impl DBRawIterator {
/// // There are no keys in the database /// // There are no keys in the database
/// } /// }
pub fn seek_for_prev(&mut self, key: &[u8]) { pub fn seek_for_prev(&mut self, key: &[u8]) {
unsafe { ffi::rocksdb_iter_seek_for_prev(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); } unsafe {
ffi::rocksdb_iter_seek_for_prev(
self.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
);
}
} }
/// Seeks to the next key. /// Seeks to the next key.
/// ///
/// Returns true if the iterator is valid after this operation. /// Returns true if the iterator is valid after this operation.
pub fn next(&mut self) { pub fn next(&mut self) {
unsafe { ffi::rocksdb_iter_next(self.inner); } unsafe {
ffi::rocksdb_iter_next(self.inner);
}
} }
/// Seeks to the previous key. /// Seeks to the previous key.
/// ///
/// Returns true if the iterator is valid after this operation. /// Returns true if the iterator is valid after this operation.
pub fn prev(&mut self) { pub fn prev(&mut self) {
unsafe { ffi::rocksdb_iter_prev(self.inner); } unsafe {
ffi::rocksdb_iter_prev(self.inner);
}
} }
/// Returns a slice to the internal buffer storing the current key. /// Returns a slice to the internal buffer storing the current key.
@ -377,9 +394,7 @@ impl DBRawIterator {
/// Returns a copy of the current key. /// Returns a copy of the current key.
pub fn key(&self) -> Option<Vec<u8>> { pub fn key(&self) -> Option<Vec<u8>> {
unsafe { unsafe { self.key_inner().map(|key| key.to_vec()) }
self.key_inner().map(|key| key.to_vec())
}
} }
/// Returns a slice to the internal buffer storing the current value. /// Returns a slice to the internal buffer storing the current value.
@ -403,9 +418,7 @@ impl DBRawIterator {
/// Returns a copy of the current value. /// Returns a copy of the current value.
pub fn value(&self) -> Option<Vec<u8>> { pub fn value(&self) -> Option<Vec<u8>> {
unsafe { unsafe { self.value_inner().map(|value| value.to_vec()) }
self.value_inner().map(|value| value.to_vec())
}
} }
} }
@ -428,11 +441,12 @@ impl DBIterator {
rv rv
} }
fn new_cf(db: &DB, fn new_cf(
db: &DB,
cf_handle: ColumnFamily, cf_handle: ColumnFamily,
readopts: &ReadOptions, readopts: &ReadOptions,
mode: IteratorMode) mode: IteratorMode,
-> Result<DBIterator, Error> { ) -> Result<DBIterator, Error> {
let mut rv = DBIterator { let mut rv = DBIterator {
raw: try!(DBRawIterator::new_cf(db, cf_handle, readopts)), raw: try!(DBRawIterator::new_cf(db, cf_handle, readopts)),
direction: Direction::Forward, // blown away by set_mode() direction: Direction::Forward, // blown away by set_mode()
@ -484,7 +498,10 @@ impl Iterator for DBIterator {
if self.raw.valid() { if self.raw.valid() {
// .key() and .value() only ever return None if valid == false, which we've just cheked // .key() and .value() only ever return None if valid == false, which we've just cheked
Some((self.raw.key().unwrap().into_boxed_slice(), self.raw.value().unwrap().into_boxed_slice())) Some((
self.raw.key().unwrap().into_boxed_slice(),
self.raw.value().unwrap().into_boxed_slice(),
))
} else { } else {
None None
} }
@ -512,10 +529,11 @@ impl<'a> Snapshot<'a> {
DBIterator::new(self.db, &readopts, mode) DBIterator::new(self.db, &readopts, mode)
} }
pub fn iterator_cf(&self, pub fn iterator_cf(
&self,
cf_handle: ColumnFamily, cf_handle: ColumnFamily,
mode: IteratorMode) mode: IteratorMode,
-> Result<DBIterator, Error> { ) -> Result<DBIterator, Error> {
let mut readopts = ReadOptions::default(); let mut readopts = ReadOptions::default();
readopts.set_snapshot(self); readopts.set_snapshot(self);
DBIterator::new_cf(self.db, cf_handle, &readopts, mode) DBIterator::new_cf(self.db, cf_handle, &readopts, mode)
@ -527,9 +545,7 @@ impl<'a> Snapshot<'a> {
DBRawIterator::new(self.db, &readopts) DBRawIterator::new(self.db, &readopts)
} }
pub fn raw_iterator_cf(&self, pub fn raw_iterator_cf(&self, cf_handle: ColumnFamily) -> Result<DBRawIterator, Error> {
cf_handle: ColumnFamily)
-> Result<DBRawIterator, Error> {
let mut readopts = ReadOptions::default(); let mut readopts = ReadOptions::default();
readopts.set_snapshot(self); readopts.set_snapshot(self);
DBRawIterator::new_cf(self.db, cf_handle, &readopts) DBRawIterator::new_cf(self.db, cf_handle, &readopts)
@ -541,10 +557,7 @@ impl<'a> Snapshot<'a> {
self.db.get_opt(key, &readopts) self.db.get_opt(key, &readopts)
} }
pub fn get_cf(&self, pub fn get_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<Option<DBVector>, Error> {
cf: ColumnFamily,
key: &[u8])
-> Result<Option<DBVector>, Error> {
let mut readopts = ReadOptions::default(); let mut readopts = ReadOptions::default();
readopts.set_snapshot(self); readopts.set_snapshot(self);
self.db.get_cf_opt(cf, key, &readopts) self.db.get_cf_opt(cf, key, &readopts)
@ -584,16 +597,20 @@ impl DB {
let cpath = match CString::new(path.to_string_lossy().as_bytes()) { let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c, Ok(c) => c,
Err(_) => { Err(_) => {
return Err(Error::new("Failed to convert path to CString \ return Err(Error::new(
"Failed to convert path to CString \
when opening DB." when opening DB."
.to_owned())) .to_owned(),
))
} }
}; };
if let Err(e) = fs::create_dir_all(&path) { if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!("Failed to create RocksDB\ return Err(Error::new(format!(
"Failed to create RocksDB\
directory: `{:?}`.", directory: `{:?}`.",
e))); e
)));
} }
let db: *mut ffi::rocksdb_t; let db: *mut ffi::rocksdb_t;
@ -601,7 +618,7 @@ impl DB {
if cfs.len() == 0 { if cfs.len() == 0 {
unsafe { unsafe {
db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr() as *const _)); db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr() as *const _,));
} }
} else { } else {
let mut cfs_v = cfs.to_vec(); let mut cfs_v = cfs.to_vec();
@ -612,34 +629,39 @@ impl DB {
// We need to store our CStrings in an intermediate vector // We need to store our CStrings in an intermediate vector
// so that their pointers remain valid. // so that their pointers remain valid.
let c_cfs: Vec<CString> = cfs_v.iter() let c_cfs: Vec<CString> = cfs_v
.iter()
.map(|cf| CString::new(cf.as_bytes()).unwrap()) .map(|cf| CString::new(cf.as_bytes()).unwrap())
.collect(); .collect();
let cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect(); let mut cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect();
// These handles will be populated by DB. // These handles will be populated by DB.
let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect(); let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect();
// TODO(tyler) allow options to be passed in. // TODO(tyler) allow options to be passed in.
let cfopts: Vec<_> = cfs_v.iter() let mut cfopts: Vec<_> = cfs_v
.iter()
.map(|_| unsafe { ffi::rocksdb_options_create() as *const _ }) .map(|_| unsafe { ffi::rocksdb_options_create() as *const _ })
.collect(); .collect();
unsafe { unsafe {
db = ffi_try!(ffi::rocksdb_open_column_families(opts.inner, db = ffi_try!(ffi::rocksdb_open_column_families(
cpath.as_ptr() as *const _, opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int, cfs_v.len() as c_int,
cfnames.as_ptr() as *const _, cfnames.as_mut_ptr(),
cfopts.as_ptr(), cfopts.as_mut_ptr(),
cfhandles.as_mut_ptr())); cfhandles.as_mut_ptr(),));
} }
for handle in &cfhandles { for handle in &cfhandles {
if handle.is_null() { if handle.is_null() {
return Err(Error::new("Received null column family \ return Err(Error::new(
"Received null column family \
handle from DB." handle from DB."
.to_owned())); .to_owned(),
));
} }
} }
@ -664,22 +686,27 @@ impl DB {
let cpath = match CString::new(path.to_string_lossy().as_bytes()) { let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c, Ok(c) => c,
Err(_) => { Err(_) => {
return Err(Error::new("Failed to convert path to CString \ return Err(Error::new(
"Failed to convert path to CString \
when opening DB." when opening DB."
.to_owned())) .to_owned(),
))
} }
}; };
let mut length = 0; let mut length = 0;
unsafe { unsafe {
let ptr = ffi_try!(ffi::rocksdb_list_column_families(opts.inner, let ptr = ffi_try!(ffi::rocksdb_list_column_families(
opts.inner,
cpath.as_ptr() as *const _, cpath.as_ptr() as *const _,
&mut length)); &mut length,
));
let vec = Vec::from_raw_parts(ptr, length, length).iter().map(|&ptr| { let vec = Vec::from_raw_parts(ptr, length, length)
CString::from_raw(ptr).into_string().unwrap() .iter()
}).collect(); .map(|&ptr| CString::from_raw(ptr).into_string().unwrap())
.collect();
Ok(vec) Ok(vec)
} }
} }
@ -688,7 +715,7 @@ impl DB {
pub fn destroy<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> { pub fn destroy<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> {
let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap(); let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap();
unsafe { unsafe {
ffi_try!(ffi::rocksdb_destroy_db(opts.inner, cpath.as_ptr())); ffi_try!(ffi::rocksdb_destroy_db(opts.inner, cpath.as_ptr(),));
} }
Ok(()) Ok(())
} }
@ -696,7 +723,7 @@ impl DB {
pub fn repair<P: AsRef<Path>>(opts: Options, path: P) -> Result<(), Error> { pub fn repair<P: AsRef<Path>>(opts: Options, path: P) -> Result<(), Error> {
let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap(); let cpath = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap();
unsafe { unsafe {
ffi_try!(ffi::rocksdb_repair_db(opts.inner, cpath.as_ptr())); ffi_try!(ffi::rocksdb_repair_db(opts.inner, cpath.as_ptr(),));
} }
Ok(()) Ok(())
} }
@ -707,7 +734,7 @@ impl DB {
pub fn write_opt(&self, batch: WriteBatch, writeopts: &WriteOptions) -> Result<(), Error> { pub fn write_opt(&self, batch: WriteBatch, writeopts: &WriteOptions) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_write(self.inner, writeopts.inner, batch.inner)); ffi_try!(ffi::rocksdb_write(self.inner, writeopts.inner, batch.inner,));
} }
Ok(()) Ok(())
} }
@ -724,21 +751,25 @@ impl DB {
pub fn get_opt(&self, key: &[u8], readopts: &ReadOptions) -> Result<Option<DBVector>, Error> { pub fn get_opt(&self, key: &[u8], readopts: &ReadOptions) -> Result<Option<DBVector>, Error> {
if readopts.inner.is_null() { if readopts.inner.is_null() {
return Err(Error::new("Unable to create RocksDB read options. \ return Err(Error::new(
"Unable to create RocksDB read options. \
This is a fairly trivial call, and its \ This is a fairly trivial call, and its \
failure may be indicative of a \ failure may be indicative of a \
mis-compiled or mis-loaded RocksDB \ mis-compiled or mis-loaded RocksDB \
library." library."
.to_owned())); .to_owned(),
));
} }
unsafe { unsafe {
let mut val_len: size_t = 0; let mut val_len: size_t = 0;
let val = ffi_try!(ffi::rocksdb_get(self.inner, let val = ffi_try!(ffi::rocksdb_get(
self.inner,
readopts.inner, readopts.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
&mut val_len)) as *mut u8; &mut val_len,
)) as *mut u8;
if val.is_null() { if val.is_null() {
Ok(None) Ok(None)
} else { } else {
@ -752,28 +783,33 @@ impl DB {
self.get_opt(key, &ReadOptions::default()) self.get_opt(key, &ReadOptions::default())
} }
pub fn get_cf_opt(&self, pub fn get_cf_opt(
&self,
cf: ColumnFamily, cf: ColumnFamily,
key: &[u8], key: &[u8],
readopts: &ReadOptions) readopts: &ReadOptions,
-> Result<Option<DBVector>, Error> { ) -> Result<Option<DBVector>, Error> {
if readopts.inner.is_null() { if readopts.inner.is_null() {
return Err(Error::new("Unable to create RocksDB read options. \ return Err(Error::new(
"Unable to create RocksDB read options. \
This is a fairly trivial call, and its \ This is a fairly trivial call, and its \
failure may be indicative of a \ failure may be indicative of a \
mis-compiled or mis-loaded RocksDB \ mis-compiled or mis-loaded RocksDB \
library." library."
.to_owned())); .to_owned(),
));
} }
unsafe { unsafe {
let mut val_len: size_t = 0; let mut val_len: size_t = 0;
let val = ffi_try!(ffi::rocksdb_get_cf(self.inner, let val = ffi_try!(ffi::rocksdb_get_cf(
self.inner,
readopts.inner, readopts.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
&mut val_len)) as *mut u8; &mut val_len,
)) as *mut u8;
if val.is_null() { if val.is_null() {
Ok(None) Ok(None)
} else { } else {
@ -782,28 +818,27 @@ impl DB {
} }
} }
pub fn get_cf(&self, pub fn get_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<Option<DBVector>, Error> {
cf: ColumnFamily,
key: &[u8])
-> Result<Option<DBVector>, Error> {
self.get_cf_opt(cf, key, &ReadOptions::default()) self.get_cf_opt(cf, key, &ReadOptions::default())
} }
pub fn create_cf(&mut self, pub fn create_cf(&mut self, name: &str, opts: &Options) -> Result<ColumnFamily, Error> {
name: &str,
opts: &Options)
-> Result<ColumnFamily, Error> {
let cname = match CString::new(name.as_bytes()) { let cname = match CString::new(name.as_bytes()) {
Ok(c) => c, Ok(c) => c,
Err(_) => { Err(_) => {
return Err(Error::new("Failed to convert path to CString \ return Err(Error::new(
"Failed to convert path to CString \
when opening rocksdb" when opening rocksdb"
.to_owned())) .to_owned(),
))
} }
}; };
let cf = unsafe { let cf = unsafe {
let cf_handler = let cf_handler = ffi_try!(ffi::rocksdb_create_column_family(
ffi_try!(ffi::rocksdb_create_column_family(self.inner, opts.inner, cname.as_ptr())); self.inner,
opts.inner,
cname.as_ptr(),
));
let cf = ColumnFamily { inner: cf_handler }; let cf = ColumnFamily { inner: cf_handler };
self.cfs.insert(name.to_string(), cf); self.cfs.insert(name.to_string(), cf);
cf cf
@ -814,10 +849,15 @@ impl DB {
pub fn drop_cf(&mut self, name: &str) -> Result<(), Error> { pub fn drop_cf(&mut self, name: &str) -> Result<(), Error> {
let cf = self.cfs.get(name); let cf = self.cfs.get(name);
if cf.is_none() { if cf.is_none() {
return Err(Error::new(format!("Invalid column family: {}", name).to_owned())); return Err(Error::new(
format!("Invalid column family: {}", name).to_owned(),
));
} }
unsafe { unsafe {
ffi_try!(ffi::rocksdb_drop_column_family(self.inner, cf.unwrap().inner)); ffi_try!(ffi::rocksdb_drop_column_family(
self.inner,
cf.unwrap().inner,
));
} }
Ok(()) Ok(())
} }
@ -832,10 +872,11 @@ impl DB {
DBIterator::new(self, &opts, mode) DBIterator::new(self, &opts, mode)
} }
pub fn iterator_cf(&self, pub fn iterator_cf(
&self,
cf_handle: ColumnFamily, cf_handle: ColumnFamily,
mode: IteratorMode) mode: IteratorMode,
-> Result<DBIterator, Error> { ) -> Result<DBIterator, Error> {
let opts = ReadOptions::default(); let opts = ReadOptions::default();
DBIterator::new_cf(self, cf_handle, &opts, mode) DBIterator::new_cf(self, cf_handle, &opts, mode)
} }
@ -845,9 +886,7 @@ impl DB {
DBRawIterator::new(self, &opts) DBRawIterator::new(self, &opts)
} }
pub fn raw_iterator_cf(&self, pub fn raw_iterator_cf(&self, cf_handle: ColumnFamily) -> Result<DBRawIterator, Error> {
cf_handle: ColumnFamily)
-> Result<DBRawIterator, Error> {
let opts = ReadOptions::default(); let opts = ReadOptions::default();
DBRawIterator::new_cf(self, cf_handle, &opts) DBRawIterator::new_cf(self, cf_handle, &opts)
} }
@ -858,89 +897,105 @@ impl DB {
pub fn put_opt(&self, key: &[u8], value: &[u8], writeopts: &WriteOptions) -> Result<(), Error> { pub fn put_opt(&self, key: &[u8], value: &[u8], writeopts: &WriteOptions) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_put(self.inner, ffi_try!(ffi::rocksdb_put(
self.inner,
writeopts.inner, writeopts.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t)); value.len() as size_t,
));
Ok(()) Ok(())
} }
} }
pub fn put_cf_opt(&self, pub fn put_cf_opt(
&self,
cf: ColumnFamily, cf: ColumnFamily,
key: &[u8], key: &[u8],
value: &[u8], value: &[u8],
writeopts: &WriteOptions) writeopts: &WriteOptions,
-> Result<(), Error> { ) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_put_cf(self.inner, ffi_try!(ffi::rocksdb_put_cf(
self.inner,
writeopts.inner, writeopts.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t)); value.len() as size_t,
));
Ok(()) Ok(())
} }
} }
pub fn merge_opt(&self, pub fn merge_opt(
&self,
key: &[u8], key: &[u8],
value: &[u8], value: &[u8],
writeopts: &WriteOptions) writeopts: &WriteOptions,
-> Result<(), Error> { ) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_merge(self.inner, ffi_try!(ffi::rocksdb_merge(
self.inner,
writeopts.inner, writeopts.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t)); value.len() as size_t,
));
Ok(()) Ok(())
} }
} }
pub fn merge_cf_opt(&self, pub fn merge_cf_opt(
&self,
cf: ColumnFamily, cf: ColumnFamily,
key: &[u8], key: &[u8],
value: &[u8], value: &[u8],
writeopts: &WriteOptions) writeopts: &WriteOptions,
-> Result<(), Error> { ) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_merge_cf(self.inner, ffi_try!(ffi::rocksdb_merge_cf(
self.inner,
writeopts.inner, writeopts.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t)); value.len() as size_t,
));
Ok(()) Ok(())
} }
} }
pub fn delete_opt(&self, key: &[u8], writeopts: &WriteOptions) -> Result<(), Error> { pub fn delete_opt(&self, key: &[u8], writeopts: &WriteOptions) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_delete(self.inner, ffi_try!(ffi::rocksdb_delete(
self.inner,
writeopts.inner, writeopts.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t)); key.len() as size_t,
));
Ok(()) Ok(())
} }
} }
pub fn delete_cf_opt(&self, pub fn delete_cf_opt(
&self,
cf: ColumnFamily, cf: ColumnFamily,
key: &[u8], key: &[u8],
writeopts: &WriteOptions) writeopts: &WriteOptions,
-> Result<(), Error> { ) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_delete_cf(self.inner, ffi_try!(ffi::rocksdb_delete_cf(
self.inner,
writeopts.inner, writeopts.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t)); key.len() as size_t,
));
Ok(()) Ok(())
} }
} }
@ -949,11 +1004,7 @@ impl DB {
self.put_opt(key, value, &WriteOptions::default()) self.put_opt(key, value, &WriteOptions::default())
} }
pub fn put_cf(&self, pub fn put_cf(&self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8],
value: &[u8])
-> Result<(), Error> {
self.put_cf_opt(cf, key, value, &WriteOptions::default()) self.put_cf_opt(cf, key, value, &WriteOptions::default())
} }
@ -961,11 +1012,7 @@ impl DB {
self.merge_opt(key, value, &WriteOptions::default()) self.merge_opt(key, value, &WriteOptions::default())
} }
pub fn merge_cf(&self, pub fn merge_cf(&self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8],
value: &[u8])
-> Result<(), Error> {
self.merge_cf_opt(cf, key, value, &WriteOptions::default()) self.merge_cf_opt(cf, key, value, &WriteOptions::default())
} }
@ -973,34 +1020,32 @@ impl DB {
self.delete_opt(key, &WriteOptions::default()) self.delete_opt(key, &WriteOptions::default())
} }
pub fn delete_cf(&self, pub fn delete_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8])
-> Result<(), Error> {
self.delete_cf_opt(cf, key, &WriteOptions::default()) self.delete_cf_opt(cf, key, &WriteOptions::default())
} }
pub fn compact_range(&self, start: Option<&[u8]>, end: Option<&[u8]>) { pub fn compact_range(&self, start: Option<&[u8]>, end: Option<&[u8]>) {
unsafe { unsafe {
ffi::rocksdb_compact_range(self.inner, ffi::rocksdb_compact_range(
self.inner,
opt_bytes_to_ptr(start), opt_bytes_to_ptr(start),
start.map_or(0, |s| s.len()) as size_t, start.map_or(0, |s| s.len()) as size_t,
opt_bytes_to_ptr(end), opt_bytes_to_ptr(end),
end.map_or(0, |e| e.len()) as size_t); end.map_or(0, |e| e.len()) as size_t,
);
} }
} }
pub fn compact_range_cf(&self, pub fn compact_range_cf(&self, cf: ColumnFamily, start: Option<&[u8]>, end: Option<&[u8]>) {
cf: ColumnFamily,
start: Option<&[u8]>,
end: Option<&[u8]>) {
unsafe { unsafe {
ffi::rocksdb_compact_range_cf(self.inner, ffi::rocksdb_compact_range_cf(
self.inner,
cf.inner, cf.inner,
opt_bytes_to_ptr(start), opt_bytes_to_ptr(start),
start.map_or(0, |s| s.len()) as size_t, start.map_or(0, |s| s.len()) as size_t,
opt_bytes_to_ptr(end), opt_bytes_to_ptr(end),
end.map_or(0, |e| e.len()) as size_t); end.map_or(0, |e| e.len()) as size_t,
);
} }
} }
} }
@ -1017,54 +1062,54 @@ impl WriteBatch {
/// Insert a value into the database under the given key. /// Insert a value into the database under the given key.
pub fn put(&mut self, key: &[u8], value: &[u8]) -> Result<(), Error> { pub fn put(&mut self, key: &[u8], value: &[u8]) -> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_put(self.inner, ffi::rocksdb_writebatch_put(
self.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t); value.len() as size_t,
);
Ok(()) Ok(())
} }
} }
pub fn put_cf(&mut self, pub fn put_cf(&mut self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8],
value: &[u8])
-> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_put_cf(self.inner, ffi::rocksdb_writebatch_put_cf(
self.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t); value.len() as size_t,
);
Ok(()) Ok(())
} }
} }
pub fn merge(&mut self, key: &[u8], value: &[u8]) -> Result<(), Error> { pub fn merge(&mut self, key: &[u8], value: &[u8]) -> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_merge(self.inner, ffi::rocksdb_writebatch_merge(
self.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t); value.len() as size_t,
);
Ok(()) Ok(())
} }
} }
pub fn merge_cf(&mut self, pub fn merge_cf(&mut self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8],
value: &[u8])
-> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_merge_cf(self.inner, ffi::rocksdb_writebatch_merge_cf(
self.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t, key.len() as size_t,
value.as_ptr() as *const c_char, value.as_ptr() as *const c_char,
value.len() as size_t); value.len() as size_t,
);
Ok(()) Ok(())
} }
} }
@ -1074,22 +1119,23 @@ impl WriteBatch {
/// Returns an error if the key was not found. /// Returns an error if the key was not found.
pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> { pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_delete(self.inner, ffi::rocksdb_writebatch_delete(
self.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t); key.len() as size_t,
);
Ok(()) Ok(())
} }
} }
pub fn delete_cf(&mut self, pub fn delete_cf(&mut self, cf: ColumnFamily, key: &[u8]) -> Result<(), Error> {
cf: ColumnFamily,
key: &[u8])
-> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_delete_cf(self.inner, ffi::rocksdb_writebatch_delete_cf(
self.inner,
cf.inner, cf.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t); key.len() as size_t,
);
Ok(()) Ok(())
} }
} }
@ -1149,9 +1195,11 @@ impl ReadOptions {
pub fn set_iterate_upper_bound(&mut self, key: &[u8]) { pub fn set_iterate_upper_bound(&mut self, key: &[u8]) {
unsafe { unsafe {
ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ffi::rocksdb_readoptions_set_iterate_upper_bound(
self.inner,
key.as_ptr() as *const c_char, key.as_ptr() as *const c_char,
key.len() as size_t); key.len() as size_t,
);
} }
} }
} }
@ -1310,9 +1358,11 @@ fn iterator_test() {
assert!(p.is_ok()); assert!(p.is_ok());
let iter = db.iterator(IteratorMode::Start); let iter = db.iterator(IteratorMode::Start);
for (k, v) in iter { for (k, v) in iter {
println!("Hello {}: {}", println!(
"Hello {}: {}",
str::from_utf8(&*k).unwrap(), str::from_utf8(&*k).unwrap(),
str::from_utf8(&*v).unwrap()); str::from_utf8(&*v).unwrap()
);
} }
} }
let opts = Options::default(); let opts = Options::default();

@ -16,13 +16,13 @@
use {BlockBasedOptions, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Options, use {BlockBasedOptions, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Options,
WriteOptions}; WriteOptions};
use compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn, filter_callback};
use comparator::{self, ComparatorCallback, CompareFn}; use comparator::{self, ComparatorCallback, CompareFn};
use ffi; use ffi;
use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t}; use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t};
use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback, use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback,
partial_merge_callback}; partial_merge_callback};
use compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn, filter_callback};
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use std::mem; use std::mem;
@ -124,7 +124,8 @@ impl Options {
unsafe { unsafe {
ffi::rocksdb_options_optimize_level_style_compaction( ffi::rocksdb_options_optimize_level_style_compaction(
self.inner, self.inner,
memtable_memory_budget as uint64_t); memtable_memory_budget as uint64_t,
);
} }
} }
@ -190,10 +191,12 @@ impl Options {
/// ``` /// ```
pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) { pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
unsafe { unsafe {
let level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect(); let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
ffi::rocksdb_options_set_compression_per_level(self.inner, ffi::rocksdb_options_set_compression_per_level(
level_types.as_ptr(), self.inner,
level_types.len() as size_t) level_types.as_mut_ptr(),
level_types.len() as size_t,
)
} }
} }
@ -204,17 +207,20 @@ impl Options {
}); });
unsafe { unsafe {
let mo = ffi::rocksdb_mergeoperator_create(mem::transmute(cb), let mo = ffi::rocksdb_mergeoperator_create(
mem::transmute(cb),
Some(merge_operator::destructor_callback), Some(merge_operator::destructor_callback),
Some(full_merge_callback), Some(full_merge_callback),
Some(partial_merge_callback), Some(partial_merge_callback),
None, None,
Some(merge_operator::name_callback)); Some(merge_operator::name_callback),
);
ffi::rocksdb_options_set_merge_operator(self.inner, mo); ffi::rocksdb_options_set_merge_operator(self.inner, mo);
} }
} }
#[deprecated(since="0.5.0", note="add_merge_operator has been renamed to set_merge_operator")] #[deprecated(since = "0.5.0",
note = "add_merge_operator has been renamed to set_merge_operator")]
pub fn add_merge_operator(&mut self, name: &str, merge_fn: MergeFn) { pub fn add_merge_operator(&mut self, name: &str, merge_fn: MergeFn) {
self.set_merge_operator(name, merge_fn); self.set_merge_operator(name, merge_fn);
} }
@ -230,7 +236,8 @@ impl Options {
/// If multi-threaded compaction is used, `filter_fn` may be called multiple times /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
/// simultaneously. /// simultaneously.
pub fn set_compaction_filter<F>(&mut self, name: &str, filter_fn: F) pub fn set_compaction_filter<F>(&mut self, name: &str, filter_fn: F)
where F: CompactionFilterFn + Send + 'static where
F: CompactionFilterFn + Send + 'static,
{ {
let cb = Box::new(CompactionFilterCallback { let cb = Box::new(CompactionFilterCallback {
name: CString::new(name.as_bytes()).unwrap(), name: CString::new(name.as_bytes()).unwrap(),
@ -238,10 +245,12 @@ impl Options {
}); });
unsafe { unsafe {
let cf = ffi::rocksdb_compactionfilter_create(mem::transmute(cb), let cf = ffi::rocksdb_compactionfilter_create(
mem::transmute(cb),
Some(compaction_filter::destructor_callback::<F>), Some(compaction_filter::destructor_callback::<F>),
Some(filter_callback::<F>), Some(filter_callback::<F>),
Some(compaction_filter::name_callback::<F>)); Some(compaction_filter::name_callback::<F>),
);
ffi::rocksdb_options_set_compaction_filter(self.inner, cf); ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
} }
} }
@ -259,10 +268,12 @@ impl Options {
}); });
unsafe { unsafe {
let cmp = ffi::rocksdb_comparator_create(mem::transmute(cb), let cmp = ffi::rocksdb_comparator_create(
mem::transmute(cb),
Some(comparator::destructor_callback), Some(comparator::destructor_callback),
Some(comparator::compare_callback), Some(comparator::compare_callback),
Some(comparator::name_callback)); Some(comparator::name_callback),
);
ffi::rocksdb_options_set_comparator(self.inner, cmp); ffi::rocksdb_options_set_comparator(self.inner, cmp);
} }
} }
@ -365,8 +376,9 @@ impl Options {
/// opts.set_allow_concurrent_memtable_write(false); /// opts.set_allow_concurrent_memtable_write(false);
/// ``` /// ```
pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) { pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
unsafe { ffi::rocksdb_options_set_allow_concurrent_memtable_write(self.inner, unsafe {
allow as c_uchar) } ffi::rocksdb_options_set_allow_concurrent_memtable_write(self.inner, allow as c_uchar)
}
} }
/// Enable direct I/O mode for reading /// Enable direct I/O mode for reading
@ -413,8 +425,10 @@ impl Options {
/// ``` /// ```
pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) { pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
unsafe { unsafe {
ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(self.inner, ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
enabled as c_uchar); self.inner,
enabled as c_uchar,
);
} }
} }
@ -446,7 +460,8 @@ impl Options {
/// let mut opts = Options::default(); /// let mut opts = Options::default();
/// opts.set_allow_os_buffer(false); /// opts.set_allow_os_buffer(false);
/// ``` /// ```
#[deprecated(since="0.7.0", note="replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods")] #[deprecated(since = "0.7.0",
note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods")]
pub fn set_allow_os_buffer(&mut self, is_allow: bool) { pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
self.set_use_direct_reads(!is_allow); self.set_use_direct_reads(!is_allow);
self.set_use_direct_io_for_flush_and_compaction(!is_allow); self.set_use_direct_io_for_flush_and_compaction(!is_allow);
@ -599,9 +614,9 @@ impl Options {
/// use rocksdb::Options; /// use rocksdb::Options;
/// ///
/// let mut opts = Options::default(); /// let mut opts = Options::default();
/// opts.set_max_bytes_for_level_multiplier(4); /// opts.set_max_bytes_for_level_multiplier(4.0);
/// ``` /// ```
pub fn set_max_bytes_for_level_multiplier(&mut self, mul: i32) { pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
unsafe { unsafe {
ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul); ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
} }

@ -34,7 +34,7 @@ pub fn opt_bytes_to_ptr(opt: Option<&[u8]>) -> *const c_char {
} }
macro_rules! ffi_try { macro_rules! ffi_try {
( $($function:ident)::*( $( $arg:expr ),* ) ) => ({ ( $($function:ident)::*( $( $arg:expr,)* ) ) => ({
let mut err: *mut ::libc::c_char = ::std::ptr::null_mut(); let mut err: *mut ::libc::c_char = ::std::ptr::null_mut();
let result = $($function)::*($($arg),*, &mut err); let result = $($function)::*($($arg),*, &mut err);
if !err.is_null() { if !err.is_null() {

@ -44,11 +44,12 @@ pub mod compaction_filter;
mod db; mod db;
mod db_options; mod db_options;
pub use db::{DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode, DBVector, pub use compaction_filter::Decision as CompactionDecision;
ReadOptions, Direction, IteratorMode, Snapshot, WriteBatch, new_bloom_filter}; pub use db::{DBCompactionStyle, DBCompressionType, DBIterator, DBRawIterator, DBRecoveryMode,
DBVector, ReadOptions, Direction, IteratorMode, Snapshot, WriteBatch,
new_bloom_filter};
pub use merge_operator::MergeOperands; pub use merge_operator::MergeOperands;
pub use compaction_filter::Decision as CompactionDecision;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::error; use std::error;
use std::fmt; use std::fmt;

@ -76,7 +76,8 @@ pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
cb.name.as_ptr() cb.name.as_ptr()
} }
pub unsafe extern "C" fn full_merge_callback(raw_cb: *mut c_void, pub unsafe extern "C" fn full_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char, raw_key: *const c_char,
key_len: size_t, key_len: size_t,
existing_value: *const c_char, existing_value: *const c_char,
@ -85,8 +86,8 @@ pub unsafe extern "C" fn full_merge_callback(raw_cb: *mut c_void,
operands_list_len: *const size_t, operands_list_len: *const size_t,
num_operands: c_int, num_operands: c_int,
success: *mut u8, success: *mut u8,
new_value_length: *mut size_t) new_value_length: *mut size_t,
-> *mut c_char { ) -> *mut c_char {
let cb = &mut *(raw_cb as *mut MergeOperatorCallback); let cb = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
@ -102,15 +103,16 @@ pub unsafe extern "C" fn full_merge_callback(raw_cb: *mut c_void,
buf as *mut c_char buf as *mut c_char
} }
pub unsafe extern "C" fn partial_merge_callback(raw_cb: *mut c_void, pub unsafe extern "C" fn partial_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char, raw_key: *const c_char,
key_len: size_t, key_len: size_t,
operands_list: *const *const c_char, operands_list: *const *const c_char,
operands_list_len: *const size_t, operands_list_len: *const size_t,
num_operands: c_int, num_operands: c_int,
success: *mut u8, success: *mut u8,
new_value_length: *mut size_t) new_value_length: *mut size_t,
-> *mut c_char { ) -> *mut c_char {
let cb = &mut *(raw_cb as *mut MergeOperatorCallback); let cb = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands); let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
@ -134,10 +136,11 @@ pub struct MergeOperands {
} }
impl MergeOperands { impl MergeOperands {
fn new(operands_list: *const *const c_char, fn new(
operands_list: *const *const c_char,
operands_list_len: *const size_t, operands_list_len: *const size_t,
num_operands: c_int) num_operands: c_int,
-> MergeOperands { ) -> MergeOperands {
assert!(num_operands >= 0); assert!(num_operands >= 0);
MergeOperands { MergeOperands {
operands_list: operands_list, operands_list: operands_list,
@ -164,8 +167,10 @@ impl<'a> Iterator for &'a mut MergeOperands {
let len = *len_ptr as usize; let len = *len_ptr as usize;
let ptr = base + (spacing * self.cursor); let ptr = base + (spacing * self.cursor);
self.cursor += 1; self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8) as *const u8, Some(mem::transmute(slice::from_raw_parts(
len))) *(ptr as *const *const u8) as *const u8,
len,
)))
} }
} }
} }
@ -178,10 +183,11 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)] #[cfg(test)]
#[allow(unused_variables)] #[allow(unused_variables)]
fn test_provided_merge(new_key: &[u8], fn test_provided_merge(
new_key: &[u8],
existing_val: Option<&[u8]>, existing_val: Option<&[u8]>,
operands: &mut MergeOperands) operands: &mut MergeOperands,
-> Vec<u8> { ) -> Vec<u8> {
let nops = operands.size_hint().0; let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops); let mut result: Vec<u8> = Vec::with_capacity(nops);
if let Some(v) = existing_val { if let Some(v) = existing_val {

Loading…
Cancel
Save