Merge pull request #314 from aleksuss/mutable_cf_map

Change a column family storing
master
Oleksandr Anyshchenko 5 years ago committed by GitHub
commit 25652570df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 19
      .travis.yml
  2. 1
      CHANGELOG.md
  3. 8
      librocksdb-sys/build.rs
  4. 2
      librocksdb-sys/tests/ffi.rs
  5. 48
      src/backup.rs
  6. 129
      src/db.rs
  7. 4
      src/db_options.rs
  8. 11
      src/lib.rs
  9. 56
      tests/test_backup.rs
  10. 10
      tests/test_column_family.rs
  11. 2
      tests/test_db.rs
  12. 14
      tests/test_multithreaded.rs
  13. 14
      tests/test_property.rs
  14. 6
      tests/test_slice_transform.rs

@ -1,6 +1,6 @@
language: rust
dist: trusty
sudo: true
dist: xenial
sudo: false
os:
- linux
@ -17,10 +17,12 @@ addons:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty
packages:
- g++-5
- llvm-3.9-dev
- libclang-3.9-dev
- clang-3.9
- g++
- llvm-dev
- libclang-dev
- clang
cache: cargo
install:
- rustup component add rustfmt
@ -30,8 +32,3 @@ script:
- cargo fmt --all -- --check
- cargo test --manifest-path=librocksdb-sys/Cargo.toml
- cargo test
cache: cargo
before_cache:
- rm -rfv target/

@ -10,6 +10,7 @@
* Added `Sync` and `Send` implementations to `Snapshot` (pavel-mukhanov)
* Added `raw_iterator_cf_opt` to the DB API (rnarubin)
* Added `DB::latest_sequence_number` method (vitvakatu)
* Changed column families storing (aleksuss)
## 0.12.2 (2019-05-03)

@ -98,10 +98,10 @@ fn build_rocksdb() {
// (about 2011).
config.define("HAVE_PCLMUL", Some("1"));
config.define("HAVE_SSE42", Some("1"));
config.flag("-msse2");
config.flag("-msse4.1");
config.flag("-msse4.2");
config.flag("-mpclmul");
config.flag_if_supported("-msse2");
config.flag_if_supported("-msse4.1");
config.flag_if_supported("-msse4.2");
config.flag_if_supported("-mpclmul");
}
if target.contains("darwin") {

@ -716,7 +716,7 @@ fn ffi() {
StartPhase("approximate_sizes");
{
let mut sizes: [uint64_t; 2] = [0, 0];
let mut sizes: [u64; 2] = [0, 0];
let start: [*const c_char; 2] = [cstrp!("a"), cstrp!("k00000000000000010000")];
let start_len: [size_t; 2] = [1, 21];
let limit: [*const c_char; 2] = [cstrp!("k00000000000000010000"), cstrp!("z")];

@ -16,7 +16,7 @@
use ffi;
use {Error, DB};
use libc::{c_int, uint32_t};
use libc::c_int;
use std::ffi::CString;
use std::path::Path;
@ -73,7 +73,7 @@ impl BackupEngine {
unsafe {
ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(
self.inner,
num_backups_to_keep as uint32_t,
num_backups_to_keep as u32,
));
Ok(())
}
@ -202,47 +202,3 @@ impl Drop for RestoreOptions {
}
}
}
#[test]
fn backup_restore() {
use db::DBVector;
use Options;
// create backup
let path = "_rust_rocksdb_backup_restore_test";
{
let db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let r: Result<Option<DBVector>, Error> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
let backup_path = "_rust_rocksdb_backup_path";
{
let backup_opts = BackupEngineOptions::default();
let mut backup_engine = BackupEngine::open(&backup_opts, &backup_path).unwrap();
let r = backup_engine.create_new_backup(&db);
assert!(r.is_ok());
let restore_path = "_rust_rocksdb_restore_from_backup_path";
{
let mut restore_option = RestoreOptions::default();
restore_option.set_keep_log_files(true); // true to keep log files
let restore_status = backup_engine.restore_from_latest_backup(
&restore_path,
&restore_path,
&restore_option,
);
assert!(restore_status.is_ok());
let db_restore = DB::open_default(restore_path).unwrap();
let r: Result<Option<DBVector>, Error> = db_restore.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
}
assert!(DB::destroy(&Options::default(), restore_path).is_ok());
}
assert!(DB::destroy(&Options::default(), backup_path).is_ok());
}
assert!(DB::destroy(&Options::default(), path).is_ok());
}

@ -28,7 +28,6 @@ use std::path::Path;
use std::ptr;
use std::slice;
use std::str;
use std::sync::{Arc, RwLock};
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
@ -224,7 +223,7 @@ impl<'a> DBRawIterator<'a> {
fn new_cf(
db: &DB,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
readopts: &ReadOptions,
) -> Result<DBRawIterator<'a>, Error> {
unsafe {
@ -496,7 +495,7 @@ impl<'a> DBIterator<'a> {
fn new_cf(
db: &DB,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
readopts: &ReadOptions,
mode: IteratorMode,
) -> Result<DBIterator<'a>, Error> {
@ -592,7 +591,7 @@ impl<'a> Snapshot<'a> {
pub fn iterator_cf(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
mode: IteratorMode,
) -> Result<DBIterator, Error> {
let readopts = ReadOptions::default();
@ -606,7 +605,7 @@ impl<'a> Snapshot<'a> {
pub fn iterator_cf_opt(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
mut readopts: ReadOptions,
mode: IteratorMode,
) -> Result<DBIterator, Error> {
@ -621,7 +620,7 @@ impl<'a> Snapshot<'a> {
}
/// Opens a raw iterator over the data in this snapshot under the given column family, using the default read options.
pub fn raw_iterator_cf(&self, cf_handle: ColumnFamily) -> Result<DBRawIterator, Error> {
pub fn raw_iterator_cf(&self, cf_handle: &ColumnFamily) -> Result<DBRawIterator, Error> {
let readopts = ReadOptions::default();
self.raw_iterator_cf_opt(cf_handle, readopts)
}
@ -635,7 +634,7 @@ impl<'a> Snapshot<'a> {
/// Opens a raw iterator over the data in this snapshot under the given column family, using the given read options.
pub fn raw_iterator_cf_opt(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
mut readopts: ReadOptions,
) -> Result<DBRawIterator, Error> {
readopts.set_snapshot(self);
@ -649,7 +648,7 @@ impl<'a> Snapshot<'a> {
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
) -> Result<Option<DBVector>, Error> {
let readopts = ReadOptions::default();
@ -667,7 +666,7 @@ impl<'a> Snapshot<'a> {
pub fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
mut readopts: ReadOptions,
) -> Result<Option<DBVector>, Error> {
@ -754,7 +753,7 @@ impl DB {
}
let db: *mut ffi::rocksdb_t;
let cf_map = Arc::new(RwLock::new(BTreeMap::new()));
let mut cf_map = BTreeMap::new();
if cfs.is_empty() {
unsafe {
@ -807,11 +806,8 @@ impl DB {
}
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cf_map
.write()
.map_err(|e| Error::new(e.to_string()))?
.insert(n.name.clone(), h);
for (cf_desc, inner) in cfs_v.iter().zip(cfhandles) {
cf_map.insert(cf_desc.name.clone(), ColumnFamily { inner });
}
}
@ -938,7 +934,7 @@ impl DB {
pub fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBVector>, Error> {
@ -975,7 +971,7 @@ impl DB {
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
) -> Result<Option<DBVector>, Error> {
self.get_cf_opt(cf, key.as_ref(), &ReadOptions::default())
@ -1027,7 +1023,7 @@ impl DB {
/// allows specifying ColumnFamily
pub fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error> {
@ -1064,13 +1060,13 @@ impl DB {
/// leverages default options.
pub fn get_pinned_cf<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
) -> Result<Option<DBPinnableSlice>, Error> {
self.get_pinned_cf_opt(cf, key, &ReadOptions::default())
}
pub fn create_cf<N: AsRef<str>>(&self, name: N, opts: &Options) -> Result<ColumnFamily, Error> {
pub fn create_cf<N: AsRef<str>>(&mut self, name: N, opts: &Options) -> Result<(), Error> {
let cname = match CString::new(name.as_ref().as_bytes()) {
Ok(c) => c,
Err(_) => {
@ -1081,35 +1077,23 @@ impl DB {
));
}
};
let cf = unsafe {
let cf_handle = ffi_try!(ffi::rocksdb_create_column_family(
unsafe {
let inner = ffi_try!(ffi::rocksdb_create_column_family(
self.inner,
opts.inner,
cname.as_ptr(),
));
self.cfs
.write()
.map_err(|e| Error::new(e.to_string()))?
.insert(name.as_ref().to_string(), cf_handle);
ColumnFamily {
inner: cf_handle,
db: PhantomData,
}
.insert(name.as_ref().to_string(), ColumnFamily { inner });
};
Ok(cf)
Ok(())
}
pub fn drop_cf(&self, name: &str) -> Result<(), Error> {
if let Some(cf) = self
.cfs
.write()
.map_err(|e| Error::new(e.to_string()))?
.remove(name)
{
pub fn drop_cf(&mut self, name: &str) -> Result<(), Error> {
if let Some(cf) = self.cfs.remove(name) {
unsafe {
ffi_try!(ffi::rocksdb_drop_column_family(self.inner, cf,));
ffi_try!(ffi::rocksdb_drop_column_family(self.inner, cf.inner,));
}
Ok(())
} else {
@ -1120,11 +1104,8 @@ impl DB {
}
/// Return the underlying column family handle.
pub fn cf_handle(&self, name: &str) -> Option<ColumnFamily> {
self.cfs.read().ok()?.get(name).map(|h| ColumnFamily {
inner: *h,
db: PhantomData,
})
pub fn cf_handle(&self, name: &str) -> Option<&ColumnFamily> {
self.cfs.get(name)
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
@ -1140,7 +1121,7 @@ impl DB {
/// This is used when you want to iterate over a specific ColumnFamily with a modified ReadOptions
pub fn iterator_cf_opt(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
readopts: &ReadOptions,
mode: IteratorMode,
) -> Result<DBIterator, Error> {
@ -1168,7 +1149,7 @@ impl DB {
pub fn iterator_cf(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
mode: IteratorMode,
) -> Result<DBIterator, Error> {
let opts = ReadOptions::default();
@ -1177,7 +1158,7 @@ impl DB {
pub fn full_iterator_cf(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
mode: IteratorMode,
) -> Result<DBIterator, Error> {
let mut opts = ReadOptions::default();
@ -1187,7 +1168,7 @@ impl DB {
pub fn prefix_iterator_cf<P: AsRef<[u8]>>(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
prefix: P,
) -> Result<DBIterator, Error> {
let mut opts = ReadOptions::default();
@ -1207,7 +1188,7 @@ impl DB {
}
/// Opens a raw iterator over the given column family, using the default read options
pub fn raw_iterator_cf(&self, cf_handle: ColumnFamily) -> Result<DBRawIterator, Error> {
pub fn raw_iterator_cf(&self, cf_handle: &ColumnFamily) -> Result<DBRawIterator, Error> {
let opts = ReadOptions::default();
DBRawIterator::new_cf(self, cf_handle, &opts)
}
@ -1220,7 +1201,7 @@ impl DB {
/// Opens a raw iterator over the given column family, using the given read options
pub fn raw_iterator_cf_opt(
&self,
cf_handle: ColumnFamily,
cf_handle: &ColumnFamily,
readopts: &ReadOptions,
) -> Result<DBRawIterator, Error> {
DBRawIterator::new_cf(self, cf_handle, readopts)
@ -1253,7 +1234,7 @@ impl DB {
pub fn put_cf_opt<K, V>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
value: V,
writeopts: &WriteOptions,
@ -1302,7 +1283,7 @@ impl DB {
pub fn merge_cf_opt<K, V>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
value: V,
writeopts: &WriteOptions,
@ -1348,7 +1329,7 @@ impl DB {
pub fn delete_cf_opt<K: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
@ -1374,7 +1355,7 @@ impl DB {
self.put_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn put_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
pub fn put_cf<K, V>(&self, cf: &ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
@ -1390,7 +1371,7 @@ impl DB {
self.merge_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn merge_cf<K, V>(&self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
pub fn merge_cf<K, V>(&self, cf: &ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
@ -1402,7 +1383,7 @@ impl DB {
self.delete_opt(key.as_ref(), &WriteOptions::default())
}
pub fn delete_cf<K: AsRef<[u8]>>(&self, cf: ColumnFamily, key: K) -> Result<(), Error> {
pub fn delete_cf<K: AsRef<[u8]>>(&self, cf: &ColumnFamily, key: K) -> Result<(), Error> {
self.delete_cf_opt(cf, key.as_ref(), &WriteOptions::default())
}
@ -1423,7 +1404,7 @@ impl DB {
pub fn compact_range_cf<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
start: Option<S>,
end: Option<E>,
) {
@ -1512,7 +1493,11 @@ impl DB {
///
/// For a full list of properties, see
/// https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L428-L634
pub fn property_value_cf(&self, cf: ColumnFamily, name: &str) -> Result<Option<String>, Error> {
pub fn property_value_cf(
&self,
cf: &ColumnFamily,
name: &str,
) -> Result<Option<String>, Error> {
let prop_name = match CString::new(name) {
Ok(c) => c,
Err(e) => {
@ -1568,7 +1553,7 @@ impl DB {
/// https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L654-L689
pub fn property_int_value_cf(
&self,
cf: ColumnFamily,
cf: &ColumnFamily,
name: &str,
) -> Result<Option<u64>, Error> {
match self.property_value_cf(cf, name) {
@ -1629,7 +1614,7 @@ impl WriteBatch {
}
}
pub fn put_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
pub fn put_cf<K, V>(&mut self, cf: &ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
@ -1670,7 +1655,7 @@ impl WriteBatch {
}
}
pub fn merge_cf<K, V>(&mut self, cf: ColumnFamily, key: K, value: V) -> Result<(), Error>
pub fn merge_cf<K, V>(&mut self, cf: &ColumnFamily, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
@ -1707,7 +1692,7 @@ impl WriteBatch {
}
}
pub fn delete_cf<K: AsRef<[u8]>>(&mut self, cf: ColumnFamily, key: K) -> Result<(), Error> {
pub fn delete_cf<K: AsRef<[u8]>>(&mut self, cf: &ColumnFamily, key: K) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
@ -1748,7 +1733,7 @@ impl WriteBatch {
/// keys exist in the range ["begin_key", "end_key").
pub fn delete_range_cf<K: AsRef<[u8]>>(
&mut self,
cf: ColumnFamily,
cf: &ColumnFamily,
from: K,
to: K,
) -> Result<(), Error> {
@ -1793,10 +1778,8 @@ impl Drop for WriteBatch {
impl Drop for DB {
fn drop(&mut self) {
unsafe {
if let Ok(cfs) = self.cfs.read() {
for cf in cfs.values() {
ffi::rocksdb_column_family_handle_destroy(*cf);
}
for cf in self.cfs.values() {
ffi::rocksdb_column_family_handle_destroy(cf.inner);
}
ffi::rocksdb_close(self.inner);
}
@ -1842,13 +1825,11 @@ impl ReadOptions {
/// that this [`ReadOptions`] value does not leave the scope too early (e.g. `DB::iterator_cf_opt`).
pub unsafe fn set_iterate_upper_bound<K: AsRef<[u8]>>(&mut self, key: K) {
let key = key.as_ref();
unsafe {
ffi::rocksdb_readoptions_set_iterate_upper_bound(
self.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
);
}
ffi::rocksdb_readoptions_set_iterate_upper_bound(
self.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
);
}
pub fn set_prefix_same_as_start(&mut self, v: bool) {

@ -16,7 +16,7 @@ use std::ffi::{CStr, CString};
use std::mem;
use std::path::Path;
use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t};
use libc::{self, c_int, c_uchar, c_uint, c_void, size_t};
use compaction_filter::{self, filter_callback, CompactionFilterCallback, CompactionFilterFn};
use comparator::{self, ComparatorCallback, CompareFn};
@ -172,7 +172,7 @@ impl Options {
unsafe {
ffi::rocksdb_options_optimize_level_style_compaction(
self.inner,
memtable_memory_budget as uint64_t,
memtable_memory_budget as u64,
);
}
}

@ -81,16 +81,14 @@ pub use merge_operator::MergeOperands;
use std::collections::BTreeMap;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
/// A RocksDB database.
///
/// See crate level documentation for a simple usage example.
pub struct DB {
inner: *mut ffi::rocksdb_t,
cfs: Arc<RwLock<BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>>>,
cfs: BTreeMap<String, ColumnFamily>,
path: PathBuf,
}
@ -283,13 +281,11 @@ pub struct WriteOptions {
/// An opaque type used to represent a column family. Returned from some functions, and used
/// in others
#[derive(Copy, Clone)]
pub struct ColumnFamily<'a> {
pub struct ColumnFamily {
inner: *mut ffi::rocksdb_column_family_handle_t,
db: PhantomData<&'a DB>,
}
unsafe impl<'a> Send for ColumnFamily<'a> {}
unsafe impl Send for ColumnFamily {}
#[cfg(test)]
mod test {
@ -334,5 +330,4 @@ mod test {
is_sync::<PlainTableFactoryOptions>();
is_sync::<ColumnFamilyDescriptor>();
}
}

@ -0,0 +1,56 @@
// Copyright 2019 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
extern crate rocksdb;
use rocksdb::{
backup::{BackupEngine, BackupEngineOptions, RestoreOptions},
Options, DB,
};
#[test]
fn backup_restore() {
// create backup
let path = "_rust_rocksdb_backup_test";
let restore_path = "_rust_rocksdb_restore_from_backup_path";
let mut opts = Options::default();
opts.create_if_missing(true);
{
let db = DB::open(&opts, path).unwrap();
assert!(db.put(b"k1", b"v1111").is_ok());
let value = db.get(b"k1");
assert_eq!(value.unwrap().unwrap().as_ref(), b"v1111");
{
let backup_path = "_rust_rocksdb_backup_path";
let backup_opts = BackupEngineOptions::default();
let mut backup_engine = BackupEngine::open(&backup_opts, &backup_path).unwrap();
assert!(backup_engine.create_new_backup(&db).is_ok());
let mut restore_option = RestoreOptions::default();
restore_option.set_keep_log_files(false); // true to keep log files
let restore_status = backup_engine.restore_from_latest_backup(
&restore_path,
&restore_path,
&restore_option,
);
assert!(restore_status.is_ok());
let db_restore = DB::open_default(restore_path).unwrap();
let value = db_restore.get(b"k1");
assert_eq!(value.unwrap().unwrap().as_ref(), b"v1111");
}
}
assert!(DB::destroy(&opts, restore_path).is_ok());
assert!(DB::destroy(&opts, path).is_ok());
}

@ -27,10 +27,10 @@ fn test_column_family() {
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_merge_operator("test operator", test_provided_merge, None);
let db = DB::open(&opts, &n).unwrap();
let mut db = DB::open(&opts, &n).unwrap();
let opts = Options::default();
match db.create_cf("cf1", &opts) {
Ok(_db) => println!("cf1 created successfully"),
Ok(()) => println!("cf1 created successfully"),
Err(e) => {
panic!("could not create column family: {}", e);
}
@ -80,7 +80,7 @@ fn test_column_family() {
{}
// should b able to drop a cf
{
let db = DB::open_cf(&Options::default(), &n, &["cf1"]).unwrap();
let mut db = DB::open_cf(&Options::default(), &n, &["cf1"]).unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
@ -97,7 +97,7 @@ fn test_can_open_db_with_results_of_list_cf() {
{
let mut opts = Options::default();
opts.create_if_missing(true);
let db = DB::open(&opts, &n).unwrap();
let mut db = DB::open(&opts, &n).unwrap();
let opts = Options::default();
assert!(db.create_cf("cf1", &opts).is_ok());
@ -244,7 +244,7 @@ fn test_create_duplicate_column_family() {
opts.create_if_missing(true);
opts.create_missing_column_families(true);
let db = match DB::open_cf(&opts, &n, &["cf1"]) {
let mut db = match DB::open_cf(&opts, &n, &["cf1"]) {
Ok(d) => d,
Err(e) => panic!("failed to create new column family: {}", e),
};

@ -247,7 +247,7 @@ fn test_sequence_number() {
{
let db = DB::open_default(&path).unwrap();
assert_eq!(db.latest_sequence_number(), 0);
db.put(b"key", b"value");
let _ = db.put(b"key", b"value");
assert_eq!(db.latest_sequence_number(), 1);
}
}

@ -48,19 +48,19 @@ pub fn test_multithreaded() {
let db3 = db.clone();
let j3 = thread::spawn(move || {
for _ in 1..N {
match db3.get(b"key") {
let result = match db3.get(b"key") {
Ok(Some(v)) => {
if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
false
} else {
true
}
}
_ => {
assert!(false);
}
}
_ => false,
};
assert!(result);
}
});
j1.join().unwrap();
j2.join().unwrap();
j3.join().unwrap();

@ -34,8 +34,9 @@ fn property_cf_test() {
let n = DBPath::new("_rust_rocksdb_property_cf_test");
{
let opts = Options::default();
let db = DB::open_default(&n).unwrap();
let cf = db.create_cf("cf1", &opts).unwrap();
let mut db = DB::open_default(&n).unwrap();
db.create_cf("cf1", &opts).unwrap();
let cf = db.cf_handle("cf1").unwrap();
let value = db.property_value_cf(cf, "rocksdb.stats").unwrap().unwrap();
assert!(value.contains("Stats"));
@ -51,7 +52,7 @@ fn property_int_test() {
.property_int_value("rocksdb.estimate-live-data-size")
.unwrap();
assert!(value == Some(0));
assert_eq!(value, Some(0));
}
}
@ -60,12 +61,13 @@ fn property_int_cf_test() {
let n = DBPath::new("_rust_rocksdb_property_int_cf_test");
{
let opts = Options::default();
let db = DB::open_default(&n).unwrap();
let cf = db.create_cf("cf1", &opts).unwrap();
let mut db = DB::open_default(&n).unwrap();
db.create_cf("cf1", &opts).unwrap();
let cf = db.cf_handle("cf1").unwrap();
let total_keys = db
.property_int_value_cf(cf, "rocksdb.estimate-num-keys")
.unwrap();
assert!(total_keys == Some(0));
assert_eq!(total_keys, Some(0));
}
}

@ -6,14 +6,14 @@ use util::DBPath;
#[test]
pub fn test_slice_transform() {
let n = DBPath::new("_rust_rocksdb_slicetransform_test");
let db_path = DBPath::new("_rust_rocksdb_slicetransform_test");
{
let a1: Box<[u8]> = key(b"aaa1");
let a2: Box<[u8]> = key(b"aaa2");
let b1: Box<[u8]> = key(b"bbb1");
let b2: Box<[u8]> = key(b"bbb2");
fn first_three<'a>(k: &'a [u8]) -> &'a [u8] {
fn first_three(k: &[u8]) -> &[u8] {
&k[..3]
}
@ -23,7 +23,7 @@ pub fn test_slice_transform() {
opts.create_if_missing(true);
opts.set_prefix_extractor(prefix_extractor);
let db = DB::open(&opts, &n).unwrap();
let db = DB::open(&opts, &db_path).unwrap();
assert!(db.put(&*a1, &*a1).is_ok());
assert!(db.put(&*a2, &*a2).is_ok());

Loading…
Cancel
Save