Merge pull request #60 from spacejam/tyler_cleanup

cleanup for 0.4.0
master
Tyler Neely 9 years ago committed by GitHub
commit fea624461b
  1. 4
      Cargo.toml
  2. 10
      README.md
  3. 18
      src/ffi.rs
  4. 23
      src/main.rs
  5. 19
      src/merge_operator.rs
  6. 106
      src/rocksdb.rs
  7. 128
      src/rocksdb_options.rs
  8. 14
      test/test_column_family.rs
  9. 2
      test/test_iterator.rs
  10. 2
      test/test_multithreaded.rs

@ -2,7 +2,7 @@
name = "rocksdb"
description = "A Rust wrapper for Facebook's RocksDB embeddable database."
version = "0.3.5"
version = "0.4.0"
authors = ["Tyler Neely <t@jujit.su>", "David Greenberg <dsg123456789@gmail.com>"]
license = "Apache-2.0"
keywords = ["database", "embedded", "LSM-tree", "persistence"]
@ -24,4 +24,4 @@ name = "test"
path = "test/test.rs"
[dependencies]
libc = "0.2.10"
libc = "0.2.13"

@ -3,7 +3,7 @@ rust-rocksdb
[![Build Status](https://travis-ci.org/spacejam/rust-rocksdb.svg?branch=master)](https://travis-ci.org/spacejam/rust-rocksdb)
[![crates.io](http://meritbadge.herokuapp.com/rocksdb)](https://crates.io/crates/rocksdb)
This library has been tested against RocksDB 3.13.1 on linux and OSX. The 0.3.5 crate should work with the Rust 1.5 stable and nightly releases as of 5/1/16.
This library has been tested against RocksDB 3.13.1 on linux and OSX. The 0.4.0 crate should work with the Rust 1.9 stable and nightly releases as of 7/1/16.
### status
- [x] basic open/put/get/delete/close
@ -36,7 +36,7 @@ sudo make install
###### Cargo.toml
```rust
[dependencies]
rocksdb = "0.3.5"
rocksdb = "0.4.0"
```
###### Code
```rust
@ -65,7 +65,7 @@ fn main() {
// NB: db is automatically freed at end of lifetime
let mut db = DB::open_default("/path/for/rocksdb/storage").unwrap();
{
let mut batch = WriteBatch::new(); // WriteBatch and db both have trait Writable
let mut batch = WriteBatch::default(); // WriteBatch and db both have trait Writable
batch.put(b"my key", b"my value");
batch.put(b"key2", b"value2");
batch.put(b"key3", b"value3");
@ -139,7 +139,7 @@ fn concat_merge(new_key: &[u8], existing_val: Option<&[u8]>,
fn main() {
let path = "/path/to/rocksdb";
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.add_merge_operator("test operator", concat_merge);
let mut db = DB::open(&opts, path).unwrap();
@ -161,7 +161,7 @@ use rocksdb::DBCompactionStyle::DBUniversalCompaction;
fn badly_tuned_for_somebody_elses_disk() -> DB {
let path = "_rust_rocksdb_optimizetest";
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_max_open_files(10000);
opts.set_use_fsync(false);

@ -72,19 +72,19 @@ pub fn new_cache(capacity: size_t) -> DBCache {
#[repr(C)]
pub enum DBCompressionType {
DBNoCompression = 0,
DBSnappyCompression = 1,
DBZlibCompression = 2,
DBBz2Compression = 3,
DBLz4Compression = 4,
DBLz4hcCompression = 5,
None = 0,
Snappy = 1,
Zlib = 2,
Bz2 = 3,
Lz4 = 4,
Lz4hc = 5,
}
#[repr(C)]
pub enum DBCompactionStyle {
DBLevelCompaction = 0,
DBUniversalCompaction = 1,
DBFifoCompaction = 2,
Level = 0,
Universal = 1,
Fifo = 2,
}
#[repr(C)]

@ -41,7 +41,7 @@ use rocksdb::{DB, MergeOperands, Options, Writable};
// std::str::from_utf8(v).unwrap());
// };
// }
// let opts = Options::new();
// let opts = Options::default();
// assert!(DB::destroy(&opts, path).is_ok());
// }
@ -71,14 +71,11 @@ fn concat_merge(_: &[u8],
operands: &mut MergeOperands)
-> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
if let Some(v) = existing_val {
for e in v {
result.push(*e)
}
}
None => (),
}
for op in operands {
for e in op {
result.push(*e);
@ -89,7 +86,7 @@ fn concat_merge(_: &[u8],
fn custom_merge() {
let path = "_rust_rocksdb_mergetest";
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.add_merge_operator("test operator", concat_merge);
{
@ -117,7 +114,7 @@ fn custom_merge() {
#[cfg(feature = "valgrind")]
fn main() {
let path = "_rust_rocksdb_valgrind";
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.add_merge_operator("test operator", concat_merge);
let db = DB::open(&opts, path).unwrap();
@ -144,7 +141,7 @@ fn main() {
#[cfg(test)]
mod tests {
use rocksdb::{BlockBasedOptions, DB, Options};
use rocksdb::DBCompactionStyle::DBUniversalCompaction;
use rocksdb::DBCompactionStyle::Universal;
fn tuned_for_somebody_elses_disk(path: &str,
opts: &mut Options,
@ -163,7 +160,7 @@ mod tests {
opts.set_min_write_buffer_number_to_merge(4);
opts.set_level_zero_stop_writes_trigger(2000);
opts.set_level_zero_slowdown_writes_trigger(0);
opts.set_compaction_style(DBUniversalCompaction);
opts.set_compaction_style(Universal);
opts.set_max_background_compactions(4);
opts.set_max_background_flushes(4);
opts.set_filter_deletes(false);
@ -183,8 +180,8 @@ mod tests {
// dirty hack due to parallel tests causing contention.
// sleep_ms(1000);
// let path = "_rust_rocksdb_optimizetest";
// let mut opts = Options::new();
// let mut blockopts = BlockBasedOptions::new();
// let mut opts = Options::default();
// let mut blockopts = BlockBasedOptions::default();
// let mut db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
// let mut i = 0 as u64;
// b.iter(|| {
@ -196,8 +193,8 @@ mod tests {
// #[bench]
// fn b_reads(b: &mut Bencher) {
// let path = "_rust_rocksdb_optimizetest";
// let mut opts = Options::new();
// let mut blockopts = BlockBasedOptions::new();
// let mut opts = Options::default();
// let mut blockopts = BlockBasedOptions::default();
// {
// let db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
// let mut i = 0 as u64;

@ -19,10 +19,11 @@ use std::mem;
use std::ptr;
use std::slice;
pub type MergeFn = fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Vec<u8>;
pub struct MergeOperatorCallback {
pub name: CString,
pub merge_fn: fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Vec<u8>,
pub merge_fn: MergeFn,
}
pub extern "C" fn destructor_callback(raw_cb: *mut c_void) {
@ -128,9 +129,10 @@ impl MergeOperands {
impl<'a> Iterator for &'a mut MergeOperands {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
match self.cursor == self.num_operands {
true => None,
false => unsafe {
if self.cursor == self.num_operands {
None
} else {
unsafe {
let base = self.operands_list as usize;
let base_len = self.operands_list_len as usize;
let spacing = mem::size_of::<*const *const u8>();
@ -142,7 +144,7 @@ impl<'a> Iterator for &'a mut MergeOperands {
self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8)
as *const u8, len)))
},
}
}
}
@ -160,14 +162,11 @@ fn test_provided_merge(new_key: &[u8],
-> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
match existing_val {
Some(v) => {
if let Some(v) = existing_val {
for e in v {
result.push(*e);
}
}
None => (),
}
for op in operands {
for e in op {
result.push(*e);
@ -184,7 +183,7 @@ fn mergetest() {
use rocksdb::{DB, DBVector, Writable};
let path = "_rust_rocksdb_mergetest";
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.add_merge_operator("test operator", test_provided_merge);
{

@ -60,10 +60,12 @@ pub enum Direction {
Reverse,
}
pub type KVBytes = (Box<[u8]>, Box<[u8]>);
impl Iterator for DBIterator {
type Item = (Box<[u8]>, Box<[u8]>);
type Item = KVBytes;
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
fn next(&mut self) -> Option<KVBytes> {
let native_iter = self.inner;
if !self.just_seeked {
match self.direction {
@ -111,8 +113,8 @@ pub enum IteratorMode<'a> {
impl DBIterator {
fn new<'b>(db: &DB,
readopts: &'b ReadOptions,
fn new(db: &DB,
readopts: &ReadOptions,
mode: IteratorMode)
-> DBIterator {
unsafe {
@ -201,13 +203,13 @@ impl<'a> Snapshot<'a> {
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let mut readopts = ReadOptions::new();
let mut readopts = ReadOptions::default();
readopts.set_snapshot(self);
DBIterator::new(self.db, &readopts, mode)
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
let mut readopts = ReadOptions::default();
readopts.set_snapshot(self);
self.db.get_opt(key, &readopts)
}
@ -216,7 +218,7 @@ impl<'a> Snapshot<'a> {
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
let mut readopts = ReadOptions::default();
readopts.set_snapshot(self);
self.db.get_cf_opt(cf, key, &readopts)
}
@ -250,7 +252,7 @@ pub trait Writable {
impl DB {
pub fn open_default(path: &str) -> Result<DB, String> {
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
DB::open(&opts, path)
}
@ -274,13 +276,8 @@ impl DB {
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
match fs::create_dir_all(&ospath) {
Err(e) => {
return Err(format!("Failed to create rocksdb directory: \
{:?}",
e))
}
Ok(_) => (),
if let Err(e) = fs::create_dir_all(&ospath) {
return Err(format!("Failed to create rocksdb directory: {:?}", e))
}
let mut err: *const i8 = 0 as *const i8;
@ -337,7 +334,7 @@ impl DB {
copts, handles, err_ptr);
}
for handle in cfhandles.iter() {
for handle in &cfhandles {
if handle.is_null() {
return Err("Received null column family handle from DB."
.to_string());
@ -411,11 +408,11 @@ impl DB {
if !err.is_null() {
return Err(error_message(err));
}
return Ok(());
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), String> {
self.write_opt(batch, &WriteOptions::new())
self.write_opt(batch, &WriteOptions::default())
}
pub fn get_opt(&self,
@ -445,15 +442,16 @@ impl DB {
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => Ok(Some(DBVector::from_c(val, val_len))),
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
self.get_opt(key, &ReadOptions::new())
self.get_opt(key, &ReadOptions::default())
}
pub fn get_cf_opt(&self,
@ -485,9 +483,10 @@ impl DB {
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => Ok(Some(DBVector::from_c(val, val_len))),
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
@ -496,7 +495,7 @@ impl DB {
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
self.get_cf_opt(cf, key, &ReadOptions::new())
self.get_cf_opt(cf, key, &ReadOptions::default())
}
pub fn create_cf(&mut self,
@ -554,16 +553,16 @@ impl DB {
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let opts = ReadOptions::new();
DBIterator::new(&self, &opts, mode)
let opts = ReadOptions::default();
DBIterator::new(self, &opts, mode)
}
pub fn iterator_cf(&self,
cf_handle: DBCFHandle,
mode: IteratorMode)
-> Result<DBIterator, String> {
let opts = ReadOptions::new();
DBIterator::new_cf(&self, cf_handle, &opts, mode)
let opts = ReadOptions::default();
DBIterator::new_cf(self, cf_handle, &opts, mode)
}
pub fn snapshot(&self) -> Snapshot {
@ -701,7 +700,7 @@ impl DB {
impl Writable for DB {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.put_opt(key, value, &WriteOptions::new())
self.put_opt(key, value, &WriteOptions::default())
}
fn put_cf(&self,
@ -709,11 +708,11 @@ impl Writable for DB {
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.put_cf_opt(cf, key, value, &WriteOptions::new())
self.put_cf_opt(cf, key, value, &WriteOptions::default())
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.merge_opt(key, value, &WriteOptions::new())
self.merge_opt(key, value, &WriteOptions::default())
}
fn merge_cf(&self,
@ -721,20 +720,20 @@ impl Writable for DB {
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.merge_cf_opt(cf, key, value, &WriteOptions::new())
self.merge_cf_opt(cf, key, value, &WriteOptions::default())
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
self.delete_opt(key, &WriteOptions::new())
self.delete_opt(key, &WriteOptions::default())
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
self.delete_cf_opt(cf, key, &WriteOptions::new())
self.delete_cf_opt(cf, key, &WriteOptions::default())
}
}
impl WriteBatch {
pub fn new() -> WriteBatch {
impl Default for WriteBatch {
fn default() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
@ -750,7 +749,7 @@ impl Drop for WriteBatch {
impl Drop for DB {
fn drop(&mut self) {
unsafe {
for (_, cf) in self.cfs.iter() {
for cf in self.cfs.values() {
rocksdb_ffi::rocksdb_column_family_handle_destroy(*cf);
}
rocksdb_ffi::rocksdb_close(self.inner);
@ -840,11 +839,6 @@ impl Drop for ReadOptions {
}
impl ReadOptions {
fn new() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
@ -863,6 +857,14 @@ impl ReadOptions {
}
}
impl Default for ReadOptions {
fn default() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
}
pub struct DBVector {
base: *mut u8,
len: usize,
@ -891,7 +893,7 @@ impl DBVector {
}
}
pub fn to_utf8<'a>(&'a self) -> Option<&'a str> {
pub fn to_utf8(&self) -> Option<&str> {
from_utf8(self.deref()).ok()
}
}
@ -908,7 +910,7 @@ fn external() {
assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
let opts = Options::new();
let opts = Options::default();
let result = DB::destroy(&opts, path);
assert!(result.is_ok());
}
@ -917,7 +919,7 @@ fn external() {
fn errors_do_stuff() {
let path = "_rust_rocksdb_error";
let db = DB::open_default(path).unwrap();
let opts = Options::new();
let opts = Options::default();
// The DB will still be open when we try to destroy and the lock should fail
match DB::destroy(&opts, path) {
Err(ref s) => {
@ -936,7 +938,7 @@ fn writebatch_works() {
let db = DB::open_default(path).unwrap();
{
// test put
let batch = WriteBatch::new();
let batch = WriteBatch::default();
assert!(db.get(b"k1").unwrap().is_none());
let _ = batch.put(b"k1", b"v1111");
assert!(db.get(b"k1").unwrap().is_none());
@ -947,14 +949,14 @@ fn writebatch_works() {
}
{
// test delete
let batch = WriteBatch::new();
let batch = WriteBatch::default();
let _ = batch.delete(b"k1");
let p = db.write(batch);
assert!(p.is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
}
let opts = Options::new();
let opts = Options::default();
assert!(DB::destroy(&opts, path).is_ok());
}
@ -976,7 +978,7 @@ fn iterator_test() {
from_utf8(&*v).unwrap());
}
}
let opts = Options::new();
let opts = Options::default();
assert!(DB::destroy(&opts, path).is_ok());
}
@ -998,6 +1000,6 @@ fn snapshot_test() {
assert!(db.get(b"k2").unwrap().is_some());
assert!(snap.get(b"k2").unwrap().is_none());
}
let opts = Options::new();
let opts = Options::default();
assert!(DB::destroy(&opts, path).is_ok());
}

@ -13,12 +13,12 @@
// limitations under the License.
//
extern crate libc;
use self::libc::{c_int, size_t};
use self::libc::c_int;
use std::ffi::CString;
use std::mem;
use rocksdb_ffi;
use merge_operator::{self, MergeOperands, MergeOperatorCallback,
use merge_operator::{self, MergeFn, MergeOperatorCallback,
full_merge_callback, partial_merge_callback};
use comparator::{self, ComparatorCallback, compare_callback};
@ -59,16 +59,6 @@ impl Drop for WriteOptions {
}
impl BlockBasedOptions {
pub fn new() -> BlockBasedOptions {
let block_opts = unsafe {
rocksdb_ffi::rocksdb_block_based_options_create()
};
if block_opts.is_null() {
panic!("Could not create rocksdb block based options".to_string());
}
BlockBasedOptions { inner: block_opts }
}
pub fn set_block_size(&mut self, size: usize) {
unsafe {
rocksdb_ffi::rocksdb_block_based_options_set_block_size(self.inner,
@ -77,41 +67,19 @@ impl BlockBasedOptions {
}
}
// TODO figure out how to create these in a Rusty way
// /pub fn set_filter(&mut self, filter: rocksdb_ffi::DBFilterPolicy) {
// / unsafe {
// / rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(
// / self.inner, filter);
// / }
// /}
/// /pub fn set_cache(&mut self, cache: rocksdb_ffi::DBCache) {
/// / unsafe {
/// / rocksdb_ffi::rocksdb_block_based_options_set_block_cache(
/// / self.inner, cache);
/// / }
/// /}
/// /pub fn set_cache_compressed(&mut self, cache: rocksdb_ffi::DBCache) {
/// / unsafe {
/// / rocksdb_ffi::
/// rocksdb_block_based_options_set_block_cache_compressed(
/// / self.inner, cache);
/// / }
/// /}
impl Options {
pub fn new() -> Options {
unsafe {
let opts = rocksdb_ffi::rocksdb_options_create();
if opts.is_null() {
panic!("Could not create rocksdb options".to_string());
}
Options { inner: opts }
impl Default for BlockBasedOptions {
fn default() -> BlockBasedOptions {
let block_opts = unsafe {
rocksdb_ffi::rocksdb_block_based_options_create()
};
if block_opts.is_null() {
panic!("Could not create rocksdb block based options".to_string());
}
BlockBasedOptions { inner: block_opts }
}
}
impl Options {
pub fn increase_parallelism(&mut self, parallelism: i32) {
unsafe {
rocksdb_ffi::rocksdb_options_increase_parallelism(self.inner,
@ -134,12 +102,9 @@ impl Options {
}
}
pub fn add_merge_operator<'a>(&mut self,
pub fn add_merge_operator(&mut self,
name: &str,
merge_fn: fn(&[u8],
Option<&[u8]>,
&mut MergeOperands)
-> Vec<u8>) {
merge_fn: MergeFn) {
let cb = Box::new(MergeOperatorCallback {
name: CString::new(name.as_bytes()).unwrap(),
merge_fn: merge_fn,
@ -157,7 +122,7 @@ impl Options {
}
}
pub fn add_comparator<'a>(&mut self,
pub fn add_comparator(&mut self,
name: &str,
compare_fn: fn(&[u8], &[u8]) -> i32) {
let cb = Box::new(ComparatorCallback {
@ -191,16 +156,13 @@ impl Options {
pub fn set_use_fsync(&mut self, useit: bool) {
unsafe {
match useit {
true => {
if useit {
rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 1)
}
false => {
} else {
rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 0)
}
}
}
}
pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
unsafe {
@ -210,13 +172,10 @@ impl Options {
pub fn set_disable_data_sync(&mut self, disable: bool) {
unsafe {
match disable {
true =>
rocksdb_ffi::rocksdb_options_set_disable_data_sync(
self.inner, 1),
false =>
rocksdb_ffi::rocksdb_options_set_disable_data_sync(
self.inner, 0),
if disable {
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner, 1)
} else {
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner, 0)
}
}
}
@ -242,7 +201,7 @@ impl Options {
}
}
pub fn set_write_buffer_size(&mut self, size: size_t) {
pub fn set_write_buffer_size(&mut self, size: usize) {
unsafe {
rocksdb_ffi::rocksdb_options_set_write_buffer_size(self.inner,
size);
@ -306,15 +265,13 @@ impl Options {
}
pub fn set_disable_auto_compactions(&mut self, disable: bool) {
let c_bool = if disable {
1
} else {
0
};
unsafe {
match disable {
true =>
rocksdb_ffi::rocksdb_options_set_disable_auto_compactions(
self.inner, 1),
false =>
rocksdb_ffi::rocksdb_options_set_disable_auto_compactions(
self.inner, 0),
}
rocksdb_ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_bool)
}
}
@ -326,13 +283,22 @@ impl Options {
}
}
impl Default for Options {
fn default() -> Options {
unsafe {
let opts = rocksdb_ffi::rocksdb_options_create();
if opts.is_null() {
panic!("Could not create rocksdb options".to_string());
}
Options { inner: opts }
}
}
}
impl WriteOptions {
pub fn new() -> WriteOptions {
let write_opts = unsafe { rocksdb_ffi::rocksdb_writeoptions_create() };
if write_opts.is_null() {
panic!("Could not create rocksdb write options".to_string());
}
WriteOptions { inner: write_opts }
WriteOptions::default()
}
pub fn set_sync(&mut self, sync: bool) {
unsafe {
@ -340,3 +306,13 @@ impl WriteOptions {
}
}
}
impl Default for WriteOptions {
fn default() -> WriteOptions {
let write_opts = unsafe { rocksdb_ffi::rocksdb_writeoptions_create() };
if write_opts.is_null() {
panic!("Could not create rocksdb write options".to_string());
}
WriteOptions { inner: write_opts }
}
}

@ -20,11 +20,11 @@ pub fn test_column_family() {
// should be able to create column families
{
let mut opts = Options::new();
let mut opts = Options::default();
opts.create_if_missing(true);
opts.add_merge_operator("test operator", test_provided_merge);
let mut db = DB::open(&opts, path).unwrap();
let opts = Options::new();
let opts = Options::default();
match db.create_cf("cf1", &opts) {
Ok(_) => println!("cf1 created successfully"),
Err(e) => {
@ -35,7 +35,7 @@ pub fn test_column_family() {
// should fail to open db without specifying same column families
{
let mut opts = Options::new();
let mut opts = Options::default();
opts.add_merge_operator("test operator", test_provided_merge);
match DB::open(&opts, path) {
Ok(_) => {
@ -52,7 +52,7 @@ pub fn test_column_family() {
// should properly open db when specyfing all column families
{
let mut opts = Options::new();
let mut opts = Options::default();
opts.add_merge_operator("test operator", test_provided_merge);
match DB::open_cf(&opts, path, &["cf1"]) {
Ok(_) => println!("successfully opened db with column family"),
@ -61,7 +61,7 @@ pub fn test_column_family() {
}
// TODO should be able to write, read, merge, batch, and iterate over a cf
{
let mut opts = Options::new();
let mut opts = Options::default();
opts.add_merge_operator("test operator", test_provided_merge);
let db = match DB::open_cf(&opts, path, &["cf1"]) {
Ok(db) => {
@ -107,14 +107,14 @@ pub fn test_column_family() {
}
// should b able to drop a cf
{
let mut db = DB::open_cf(&Options::new(), path, &["cf1"]).unwrap();
let mut db = DB::open_cf(&Options::default(), path, &["cf1"]).unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
}
}
assert!(DB::destroy(&Options::new(), path).is_ok());
assert!(DB::destroy(&Options::default(), path).is_ok());
}
fn test_provided_merge(_: &[u8],

@ -138,6 +138,6 @@ pub fn test_iterator() {
assert!(!iterator1.valid());
}
}
let opts = Options::new();
let opts = Options::default();
assert!(DB::destroy(&opts, path).is_ok());
}

@ -47,5 +47,5 @@ pub fn test_multithreaded() {
j2.join().unwrap();
j3.join().unwrap();
}
assert!(DB::destroy(&Options::new(), path).is_ok());
assert!(DB::destroy(&Options::default(), path).is_ok());
}

Loading…
Cancel
Save