Merge pull request #99 from alexreg/cleanup

Added explicit `CompareFn` type for comparator function.
master
Tyler Neely 8 years ago committed by GitHub
commit 6d37ead6c0
  1. 5
      librocksdb-sys/src/lib.rs
  2. 5
      librocksdb-sys/src/test.rs
  3. 135
      src/backup.rs
  4. 13
      src/comparator.rs
  5. 117
      src/db.rs
  6. 103
      src/db_options.rs
  7. 53
      src/lib.rs
  8. 7
      src/merge_operator.rs

@ -958,6 +958,11 @@ pub const rocksdb_fifo_compaction: c_int = 2;
pub const rocksdb_similar_size_compaction_stop_style: c_int = 0;
pub const rocksdb_total_size_compaction_stop_style: c_int = 1;
pub const rocksdb_recovery_mode_tolerate_corrupted_tail_records: c_int = 0;
pub const rocksdb_recovery_mode_absolute_consistency: c_int = 1;
pub const rocksdb_recovery_mode_point_in_time: c_int = 2;
pub const rocksdb_recovery_mode_skip_any_corrupted_record: c_int = 3;
pub enum rocksdb_t { }
pub enum rocksdb_backup_engine_t { }

@ -41,11 +41,10 @@ fn internal() {
let rustpath = "_rust_rocksdb_internaltest";
let cpath = CString::new(rustpath).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err: *mut c_char = ptr::null_mut();
let err_ptr: *mut *mut c_char = &mut err;
let db = rocksdb_open(opts, cpath_ptr as *const _, err_ptr);
let db = rocksdb_open(opts, cpath.as_ptr() as *const _, err_ptr);
if !err.is_null() {
println!("failed to open rocksdb: {}", error_message(err));
}
@ -80,7 +79,7 @@ fn internal() {
rocksdb_readoptions_destroy(readopts);
assert!(err.is_null());
rocksdb_close(db);
rocksdb_destroy_db(opts, cpath_ptr as *const _, err_ptr);
rocksdb_destroy_db(opts, cpath.as_ptr() as *const _, err_ptr);
assert!(err.is_null());
}
}

@ -0,0 +1,135 @@
// Copyright 2016 Alex Regueiro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use {DB, Error};
use ffi;
use libc::{c_int, uint32_t};
use std::ffi::CString;
use std::path::Path;
pub struct BackupEngine {
inner: *mut ffi::rocksdb_backup_engine_t,
}
pub struct BackupEngineOptions {
inner: *mut ffi::rocksdb_options_t,
}
pub struct RestoreOptions {
inner: *mut ffi::rocksdb_restore_options_t,
}
impl BackupEngine {
/// Open a backup engine with the specified options.
pub fn open<P: AsRef<Path>>(opts: &BackupEngineOptions,
path: P)
-> Result<BackupEngine, Error> {
let path = path.as_ref();
let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new("Failed to convert path to CString \
when opening backup engine"
.to_owned()))
}
};
let be: *mut ffi::rocksdb_backup_engine_t;
unsafe { be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) }
if be.is_null() {
return Err(Error::new("Could not initialize backup engine.".to_owned()));
}
Ok(BackupEngine { inner: be })
}
fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_backup_engine_create_new_backup(self.inner, db.inner));
Ok(())
}
}
fn purge_old_backups(&mut self, num_backups_to_keep: usize) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(self.inner,
num_backups_to_keep as uint32_t));
Ok(())
}
}
}
impl BackupEngineOptions {
//
}
impl RestoreOptions {
pub fn set_keep_log_files(&mut self, keep_log_files: bool) {
unsafe {
ffi::rocksdb_restore_options_set_keep_log_files(self.inner, keep_log_files as c_int);
}
}
}
impl Default for BackupEngineOptions {
fn default() -> BackupEngineOptions {
unsafe {
let opts = ffi::rocksdb_options_create();
if opts.is_null() {
panic!("Could not create RocksDB backup options".to_owned());
}
BackupEngineOptions { inner: opts }
}
}
}
impl Default for RestoreOptions {
fn default() -> RestoreOptions {
unsafe {
let opts = ffi::rocksdb_restore_options_create();
if opts.is_null() {
panic!("Could not create RocksDB restore options".to_owned());
}
RestoreOptions { inner: opts }
}
}
}
impl Drop for BackupEngine {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_backup_engine_close(self.inner);
}
}
}
impl Drop for BackupEngineOptions {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_options_destroy(self.inner);
}
}
}
impl Drop for RestoreOptions {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_restore_options_destroy(self.inner);
}
}
}

@ -13,15 +13,18 @@
// limitations under the License.
//
use libc::{c_char, c_int, c_void, size_t};
use std::cmp::Ordering;
use std::ffi::CString;
use std::mem;
use std::slice;
use libc::{c_char, c_int, c_void, size_t};
pub type CompareFn = fn(&[u8], &[u8]) -> Ordering;
pub struct ComparatorCallback {
pub name: CString,
pub f: fn(&[u8], &[u8]) -> i32,
pub f: CompareFn,
}
pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) {
@ -43,5 +46,9 @@ pub unsafe extern "C" fn compare_callback(raw_cb: *mut c_void,
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize);
let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len as usize);
(cb.f)(a, b)
match (cb.f)(a, b) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
}
}

@ -13,66 +13,54 @@
// limitations under the License.
//
use {DB, Error, Options, WriteOptions};
use ffi;
use libc::{self, c_char, c_int, c_uchar, c_void, size_t};
use std::collections::BTreeMap;
use std::error;
use std::ffi::CString;
use std::fmt;
use std::fs;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::path::Path;
use std::ptr;
use std::slice;
use std::str;
use libc::{self, c_char, c_int, c_uchar, c_void, size_t};
use {Options, WriteOptions};
use ffi;
pub fn new_bloom_filter(bits: c_int) -> *mut ffi::rocksdb_filterpolicy_t {
unsafe { ffi::rocksdb_filterpolicy_create_bloom(bits) }
}
pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t {
unsafe { ffi::rocksdb_cache_create_lru(capacity) }
}
/// RocksDB wrapper object.
pub struct DB {
inner: *mut ffi::rocksdb_t,
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
path: PathBuf,
}
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum DBCompressionType {
None = 0,
Snappy = 1,
Zlib = 2,
Bz2 = 3,
Lz4 = 4,
Lz4hc = 5,
None = ffi::rocksdb_no_compression as isize,
Snappy = ffi::rocksdb_snappy_compression as isize,
Zlib = ffi::rocksdb_zlib_compression as isize,
Bz2 = ffi::rocksdb_bz2_compression as isize,
Lz4 = ffi::rocksdb_lz4_compression as isize,
Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum DBCompactionStyle {
Level = 0,
Universal = 1,
Fifo = 2,
Level = ffi::rocksdb_level_compaction as isize,
Universal = ffi::rocksdb_universal_compaction as isize,
Fifo = ffi::rocksdb_fifo_compaction as isize,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum DBRecoveryMode {
TolerateCorruptedTailRecords = 0,
AbsoluteConsistency = 1,
PointInTime = 2,
SkipAnyCorruptedRecords = 3,
TolerateCorruptedTailRecords = ffi::rocksdb_recovery_mode_tolerate_corrupted_tail_records as isize,
AbsoluteConsistency = ffi::rocksdb_recovery_mode_absolute_consistency as isize,
PointInTime = ffi::rocksdb_recovery_mode_point_in_time as isize,
SkipAnyCorruptedRecord = ffi::rocksdb_recovery_mode_skip_any_corrupted_record as isize,
}
/// An atomic batch of mutations.
/// An atomic batch of write operations.
///
/// Making an atomic commit of several writes:
///
@ -151,39 +139,6 @@ pub enum Direction {
pub type KVBytes = (Box<[u8]>, Box<[u8]>);
#[derive(Debug, PartialEq)]
pub struct Error {
message: String,
}
impl Error {
fn new(message: String) -> Error {
Error { message: message }
}
pub fn to_string(self) -> String {
self.into()
}
}
impl From<Error> for String {
fn from(e: Error) -> String {
e.message
}
}
impl error::Error for Error {
fn description(&self) -> &str {
&self.message
}
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.message.fmt(formatter)
}
}
impl Iterator for DBIterator {
type Item = KVBytes;
@ -346,34 +301,32 @@ impl DB {
DB::open(&opts, path)
}
/// Open the database with specified options
/// Open the database with the specified options.
pub fn open<P: AsRef<Path>>(opts: &Options, path: P) -> Result<DB, Error> {
DB::open_cf(opts, path, &[])
}
/// Open a database with specified options and column family
/// Open a database with specified options and column family.
///
/// A column family must be created first by calling `DB::create_cf`
/// A column family must be created first by calling `DB::create_cf`.
///
/// # Panics
///
/// * Panics if the column family doesn't exist
/// * Panics if the column family doesn't exist.
pub fn open_cf<P: AsRef<Path>>(opts: &Options, path: P, cfs: &[&str]) -> Result<DB, Error> {
let path = path.as_ref();
let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new("Failed to convert path to CString \
when opening rocksdb"
when opening DB."
.to_owned()))
}
};
let cpath_ptr = cpath.as_ptr();
if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!("Failed to create rocksdb \
directory: {:?}",
return Err(Error::new(format!("Failed to create RocksDB\
directory: `{:?}`.",
e)));
}
@ -382,7 +335,7 @@ impl DB {
if cfs.len() == 0 {
unsafe {
db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath_ptr as *const _));
db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr() as *const _));
}
} else {
let mut cfs_v = cfs.to_vec();
@ -409,7 +362,7 @@ impl DB {
unsafe {
db = ffi_try!(ffi::rocksdb_open_column_families(opts.inner,
cpath_ptr as *const _,
cpath.as_ptr() as *const _,
cfs_v.len() as c_int,
cfnames.as_ptr() as *const _,
cfopts.as_ptr(),
@ -479,10 +432,10 @@ impl DB {
pub fn get_opt(&self, key: &[u8], readopts: &ReadOptions) -> Result<Option<DBVector>, Error> {
if readopts.inner.is_null() {
return Err(Error::new("Unable to create rocksdb read options. \
return Err(Error::new("Unable to create RocksDB read options. \
This is a fairly trivial call, and its \
failure may be indicative of a \
mis-compiled or mis-loaded rocksdb \
mis-compiled or mis-loaded RocksDB \
library."
.to_owned()));
}
@ -513,10 +466,10 @@ impl DB {
readopts: &ReadOptions)
-> Result<Option<DBVector>, Error> {
if readopts.inner.is_null() {
return Err(Error::new("Unable to create rocksdb read options. \
return Err(Error::new("Unable to create RocksDB read options. \
This is a fairly trivial call, and its \
failure may be indicative of a \
mis-compiled or mis-loaded rocksdb \
mis-compiled or mis-loaded RocksDB \
library."
.to_owned()));
}
@ -789,7 +742,7 @@ impl WriteBatch {
/// Remove the database entry for key.
///
/// Returns Err if the key was not found
/// Returns an error if the key was not found.
pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> {
unsafe {
ffi::rocksdb_writebatch_delete(self.inner,
@ -880,7 +833,7 @@ impl Default for ReadOptions {
}
}
/// Wrapper around bytes stored in the database
/// Vector of bytes stored in the database.
pub struct DBVector {
base: *mut u8,
len: usize,

@ -13,17 +13,21 @@
// limitations under the License.
//
use std::ffi::{CStr, CString};
use std::mem;
use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t};
use {BlockBasedOptions, Options, WriteOptions};
use comparator::{self, ComparatorCallback};
use {BlockBasedOptions, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Options,
WriteOptions};
use comparator::{self, ComparatorCallback, CompareFn};
use ffi;
use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t};
use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback,
partial_merge_callback};
use rocksdb::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, new_cache};
use std::ffi::{CStr, CString};
use std::mem;
pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t {
unsafe { ffi::rocksdb_cache_create_lru(capacity) }
}
impl Drop for Options {
fn drop(&mut self) {
@ -88,7 +92,7 @@ impl Default for BlockBasedOptions {
fn default() -> BlockBasedOptions {
let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
if block_opts.is_null() {
panic!("Could not create rocksdb block based options".to_owned());
panic!("Could not create RocksDB block based options");
}
BlockBasedOptions { inner: block_opts }
}
@ -125,7 +129,7 @@ impl Options {
/// If true, the database will be created if it is missing.
///
/// Default: false
/// Default: `false`
///
/// # Example
///
@ -175,7 +179,7 @@ impl Options {
/// use rocksdb::{Options, DBCompressionType};
///
/// let mut opts = Options::default();
/// opts.compression_per_level(&[
/// opts.set_compression_per_level(&[
/// DBCompressionType::None,
/// DBCompressionType::None,
/// DBCompressionType::Snappy,
@ -183,7 +187,7 @@ impl Options {
/// DBCompressionType::Snappy
/// ]);
/// ```
pub fn compression_per_level(&mut self, level_types: &[DBCompressionType]) {
pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
unsafe {
let level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
ffi::rocksdb_options_set_compression_per_level(self.inner,
@ -220,7 +224,7 @@ impl Options {
/// The client must ensure that the comparator supplied here has the same
/// name and orders keys *exactly* the same as the comparator provided to
/// previous open calls on the same DB.
pub fn set_comparator(&mut self, name: &str, compare_fn: fn(&[u8], &[u8]) -> i32) {
pub fn set_comparator(&mut self, name: &str, compare_fn: CompareFn) {
let cb = Box::new(ComparatorCallback {
name: CString::new(name.as_bytes()).unwrap(),
f: compare_fn,
@ -236,7 +240,7 @@ impl Options {
}
#[deprecated(since = "0.5.0", note = "add_comparator has been renamed to set_comparator")]
pub fn add_comparator(&mut self, name: &str, compare_fn: fn(&[u8], &[u8]) -> i32) {
pub fn add_comparator(&mut self, name: &str, compare_fn: CompareFn) {
self.set_comparator(name, compare_fn);
}
@ -247,12 +251,12 @@ impl Options {
}
/// Sets the number of open files that can be used by the DB. You may need to
/// increase this if your database has a large working set. Value -1 means
/// increase this if your database has a large working set. Value `-1` means
/// files opened are always kept open. You can estimate number of files based
/// on target_file_size_base and target_file_size_multiplier for level-based
/// compaction. For universal-style compaction, you can usually set it to -1.
/// compaction. For universal-style compaction, you can usually set it to `-1`.
///
/// Default: -1
/// Default: `-1`
///
/// # Example
///
@ -273,7 +277,7 @@ impl Options {
/// This parameter should be set to true while storing data to
/// filesystem like ext3 that can lose files after a reboot.
///
/// Default: false
/// Default: `false`
///
/// # Example
///
@ -291,9 +295,9 @@ impl Options {
/// written, asynchronously, in the background. This operation can be used
/// to smooth out write I/Os over time. Users shouldn't rely on it for
/// persistency guarantee.
/// Issue one request for every bytes_per_sync written. 0 turns it off.
/// Issue one request for every bytes_per_sync written. `0` turns it off.
///
/// Default: 0
/// Default: `0`
///
/// You may consider using rate_limiter to regulate write rate to device.
/// When rate limiter is enabled, it automatically enables bytes_per_sync
@ -331,7 +335,7 @@ impl Options {
/// cache. If the disk block is requested again this can result in
/// additional disk I/O.
///
/// On WINDOWS system, files will be opened in "unbuffered I/O" mode
/// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
/// which means that data read from the disk will not be cached or
/// bufferized. The hardware buffer of the devices may however still
/// be used. Memory mapped files are not impacted by this parameter.
@ -344,9 +348,9 @@ impl Options {
/// use rocksdb::Options;
///
/// let mut opts = Options::default();
/// opts.allow_os_buffer(false);
/// opts.set_allow_os_buffer(false);
/// ```
pub fn allow_os_buffer(&mut self, is_allow: bool) {
pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
unsafe {
ffi::rocksdb_options_set_allow_os_buffer(self.inner, is_allow as c_uchar);
}
@ -354,7 +358,7 @@ impl Options {
/// Sets the number of shards used for table cache.
///
/// Default: 6
/// Default: `6`
///
/// # Example
///
@ -371,14 +375,14 @@ impl Options {
}
/// Sets the minimum number of write buffers that will be merged together
/// before writing to storage. If set to 1, then
/// before writing to storage. If set to `1`, then
/// all write buffers are flushed to L0 as individual files and this increases
/// read amplification because a get request has to check in all of these
/// files. Also, an in-memory merge may result in writing lesser
/// data to storage if there are duplicate records in each of these
/// individual write buffers.
///
/// Default: 1
/// Default: `1`
///
/// # Example
///
@ -410,9 +414,9 @@ impl Options {
/// Increasing this value can reduce the number of reads to SST files
/// done for conflict detection.
///
/// Setting this value to 0 will cause write buffers to be freed immediately
/// Setting this value to `0` will cause write buffers to be freed immediately
/// after they are flushed.
/// If this value is set to -1, 'max_write_buffer_number' will be used.
/// If this value is set to `-1`, 'max_write_buffer_number' will be used.
///
/// Default:
/// If using a TransactionDB/OptimisticTransactionDB, the default value will
@ -446,7 +450,7 @@ impl Options {
/// Note that write_buffer_size is enforced per column family.
/// See db_write_buffer_size for sharing memory across column families.
///
/// Default: 67108864 (64MiB)
/// Default: `0x4000000` (64MiB)
///
/// Dynamically changeable through SetOptions() API
///
@ -473,7 +477,7 @@ impl Options {
/// will be 200MB, total file size for level-2 will be 2GB,
/// and total file size for level-3 will be 20GB.
///
/// Default: 268435456 (256MiB).
/// Default: `0x10000000` (256MiB).
///
/// Dynamically changeable through SetOptions() API
///
@ -491,7 +495,7 @@ impl Options {
}
}
/// Default: 10
/// Default: `10`
///
/// # Example
///
@ -534,7 +538,7 @@ impl Options {
/// be 2MB, and each file on level 2 will be 20MB,
/// and each file on level-3 will be 200MB.
///
/// Default: 67108864 (64MiB)
/// Default: `0x4000000` (64MiB)
///
/// Dynamically changeable through SetOptions() API
///
@ -553,14 +557,14 @@ impl Options {
}
/// Sets the minimum number of write buffers that will be merged together
/// before writing to storage. If set to 1, then
/// before writing to storage. If set to `1`, then
/// all write buffers are flushed to L0 as individual files and this increases
/// read amplification because a get request has to check in all of these
/// files. Also, an in-memory merge may result in writing lesser
/// data to storage if there are duplicate records in each of these
/// individual write buffers.
///
/// Default: 1
/// Default: `1`
///
/// # Example
///
@ -576,10 +580,10 @@ impl Options {
}
}
/// Sets the number of files to trigger level-0 compaction. A value <0 means that
/// Sets the number of files to trigger level-0 compaction. A value < `0` means that
/// level-0 compaction will not be triggered by number of files at all.
///
/// Default: 4
/// Default: `4`
///
/// Dynamically changeable through SetOptions() API
///
@ -598,10 +602,10 @@ impl Options {
}
/// Sets the soft limit on number of level-0 files. We start slowing down writes at this
/// point. A value <0 means that no writing slow down will be triggered by
/// point. A value < `0` means that no writing slow down will be triggered by
/// number of files in level-0.
///
/// Default: 20
/// Default: `20`
///
/// Dynamically changeable through SetOptions() API
///
@ -621,7 +625,7 @@ impl Options {
/// Sets the maximum number of level-0 files. We stop writes at this point.
///
/// Default: 24
/// Default: `24`
///
/// Dynamically changeable through SetOptions() API
///
@ -669,7 +673,7 @@ impl Options {
/// LOW priority thread pool. For more information, see
/// Env::SetBackgroundThreads
///
/// Default: 1
/// Default: `1`
///
/// # Example
///
@ -700,7 +704,7 @@ impl Options {
/// HIGH priority thread pool. For more information, see
/// Env::SetBackgroundThreads
///
/// Default: 1
/// Default: `1`
///
/// # Example
///
@ -719,7 +723,7 @@ impl Options {
/// Disables automatic compactions. Manual compactions can still
/// be issued on this column family
///
/// Default: false
/// Default: `false`
///
/// Dynamically changeable through SetOptions() API
///
@ -741,9 +745,9 @@ impl Options {
}
}
/// Measure IO stats in compactions and flushes, if true.
/// Measure IO stats in compactions and flushes, if `true`.
///
/// Default: false
/// Default: `false`
///
/// # Example
///
@ -759,7 +763,7 @@ impl Options {
}
}
/// Recovery mode to control the consistency while replaying WAL
/// Recovery mode to control the consistency while replaying WAL.
///
/// Default: DBRecoveryMode::PointInTime
///
@ -797,9 +801,9 @@ impl Options {
}
}
/// If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
/// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
///
/// Default: 600 (10 min)
/// Default: `600` (10 mins)
///
/// # Example
///
@ -815,7 +819,7 @@ impl Options {
}
}
/// Sets the number of levels for this database
/// Sets the number of levels for this database.
pub fn set_num_levels(&mut self, n: c_int) {
unsafe {
ffi::rocksdb_options_set_num_levels(self.inner, n);
@ -828,14 +832,13 @@ impl Default for Options {
unsafe {
let opts = ffi::rocksdb_options_create();
if opts.is_null() {
panic!("Could not create rocksdb options".to_owned());
panic!("Could not create RocksDB options");
}
Options { inner: opts }
}
}
}
impl WriteOptions {
pub fn new() -> WriteOptions {
WriteOptions::default()
@ -858,7 +861,7 @@ impl Default for WriteOptions {
fn default() -> WriteOptions {
let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
if write_opts.is_null() {
panic!("Could not create rocksdb write options".to_owned());
panic!("Could not create RocksDB write options");
}
WriteOptions { inner: write_opts }
}

@ -37,15 +37,60 @@ extern crate librocksdb_sys as ffi;
#[macro_use]
mod ffi_util;
pub mod backup;
mod comparator;
pub mod merge_operator;
mod rocksdb;
mod rocksdb_options;
mod db;
mod db_options;
pub use rocksdb::{DB, DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector,
Direction, Error, IteratorMode, Snapshot, WriteBatch, new_bloom_filter};
pub use db::{DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector,
Direction, IteratorMode, Snapshot, WriteBatch, new_bloom_filter};
pub use merge_operator::MergeOperands;
use std::collections::BTreeMap;
use std::error;
use std::fmt;
use std::path::PathBuf;
/// A RocksDB database.
pub struct DB {
inner: *mut ffi::rocksdb_t,
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
path: PathBuf,
}
#[derive(Debug, PartialEq)]
pub struct Error {
message: String,
}
impl Error {
fn new(message: String) -> Error {
Error { message: message }
}
pub fn to_string(self) -> String {
self.into()
}
}
impl From<Error> for String {
fn from(e: Error) -> String {
e.message
}
}
impl error::Error for Error {
fn description(&self) -> &str {
&self.message
}
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.message.fmt(formatter)
}
}
/// For configuring block-based file storage.
pub struct BlockBasedOptions {

@ -53,13 +53,13 @@
//! }
//! ```
use libc::{self, c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::mem;
use std::ptr;
use std::slice;
use libc::{self, c_char, c_int, c_void, size_t};
pub type MergeFn = fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Vec<u8>;
pub struct MergeOperatorCallback {
@ -199,8 +199,7 @@ fn test_provided_merge(new_key: &[u8],
#[test]
fn mergetest() {
use Options;
use rocksdb::DB;
use {DB, Options};
let path = "_rust_rocksdb_mergetest";
let mut opts = Options::default();

Loading…
Cancel
Save