From 57d4669d974d65cf894dc698537f7ae3f83d7f2f Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sun, 20 Nov 2016 21:17:18 +0000 Subject: [PATCH 1/8] Added explicit `CompareFn` type for comparator function. Changed return type of comparator function from `i32` to `std::cmp::Ordering`. --- src/comparator.rs | 11 +++++++++-- src/rocksdb_options.rs | 6 +++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/comparator.rs b/src/comparator.rs index 58fc65a..ae998d9 100644 --- a/src/comparator.rs +++ b/src/comparator.rs @@ -13,15 +13,18 @@ // limitations under the License. // +use std::cmp::Ordering; use std::ffi::CString; use std::mem; use std::slice; use libc::{c_char, c_int, c_void, size_t}; +pub type CompareFn = fn(&[u8], &[u8]) -> Ordering; + pub struct ComparatorCallback { pub name: CString, - pub f: fn(&[u8], &[u8]) -> i32, + pub f: CompareFn, } pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { @@ -43,5 +46,9 @@ pub unsafe extern "C" fn compare_callback(raw_cb: *mut c_void, let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback); let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize); let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len as usize); - (cb.f)(a, b) + match (cb.f)(a, b) { + Ordering::Less => -1, + Ordering::Equal => 0, + Ordering::Greater => 1, + } } diff --git a/src/rocksdb_options.rs b/src/rocksdb_options.rs index 49b03ff..d0067a7 100644 --- a/src/rocksdb_options.rs +++ b/src/rocksdb_options.rs @@ -19,7 +19,7 @@ use std::mem; use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t}; use {BlockBasedOptions, Options, WriteOptions}; -use comparator::{self, ComparatorCallback}; +use comparator::{self, ComparatorCallback, CompareFn}; use ffi; use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback, partial_merge_callback}; @@ -220,7 +220,7 @@ impl Options { /// The client must ensure that the comparator supplied here has the same /// name and orders keys *exactly* the same as the comparator provided to /// previous open calls on the same DB. - pub fn set_comparator(&mut self, name: &str, compare_fn: fn(&[u8], &[u8]) -> i32) { + pub fn set_comparator(&mut self, name: &str, compare_fn: CompareFn) { let cb = Box::new(ComparatorCallback { name: CString::new(name.as_bytes()).unwrap(), f: compare_fn, @@ -236,7 +236,7 @@ impl Options { } #[deprecated(since = "0.5.0", note = "add_comparator has been renamed to set_comparator")] - pub fn add_comparator(&mut self, name: &str, compare_fn: fn(&[u8], &[u8]) -> i32) { + pub fn add_comparator(&mut self, name: &str, compare_fn: CompareFn) { self.set_comparator(name, compare_fn); } From 882aaffd524681c71e05321dafd5d6cc41459db6 Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sun, 20 Nov 2016 21:36:13 +0000 Subject: [PATCH 2/8] Improved doc comments. --- src/rocksdb.rs | 14 ++++----- src/rocksdb_options.rs | 66 +++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/rocksdb.rs b/src/rocksdb.rs index b1eef6c..2a970a0 100644 --- a/src/rocksdb.rs +++ b/src/rocksdb.rs @@ -37,7 +37,7 @@ pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t { unsafe { ffi::rocksdb_cache_create_lru(capacity) } } -/// RocksDB wrapper object. +/// A RocksDB database. pub struct DB { inner: *mut ffi::rocksdb_t, cfs: BTreeMap, @@ -72,7 +72,7 @@ pub enum DBRecoveryMode { SkipAnyCorruptedRecords = 3, } -/// An atomic batch of mutations. +/// An atomic batch of write operations. /// /// Making an atomic commit of several writes: /// @@ -346,14 +346,14 @@ impl DB { DB::open(&opts, path) } - /// Open the database with specified options + /// Open the database with the specified options. pub fn open>(opts: &Options, path: P) -> Result { DB::open_cf(opts, path, &[]) } - /// Open a database with specified options and column family + /// Open a database with specified options and column family. /// - /// A column family must be created first by calling `DB::create_cf` + /// A column family must be created first by calling `DB::create_cf`. /// /// # Panics /// @@ -789,7 +789,7 @@ impl WriteBatch { /// Remove the database entry for key. /// - /// Returns Err if the key was not found + /// Returns an error if the key was not found. pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> { unsafe { ffi::rocksdb_writebatch_delete(self.inner, @@ -880,7 +880,7 @@ impl Default for ReadOptions { } } -/// Wrapper around bytes stored in the database +/// Vector of bytes stored in the database. pub struct DBVector { base: *mut u8, len: usize, diff --git a/src/rocksdb_options.rs b/src/rocksdb_options.rs index d0067a7..27561a6 100644 --- a/src/rocksdb_options.rs +++ b/src/rocksdb_options.rs @@ -125,7 +125,7 @@ impl Options { /// If true, the database will be created if it is missing. /// - /// Default: false + /// Default: `false` /// /// # Example /// @@ -247,12 +247,12 @@ impl Options { } /// Sets the number of open files that can be used by the DB. You may need to - /// increase this if your database has a large working set. Value -1 means + /// increase this if your database has a large working set. Value `-1` means /// files opened are always kept open. You can estimate number of files based /// on target_file_size_base and target_file_size_multiplier for level-based - /// compaction. For universal-style compaction, you can usually set it to -1. + /// compaction. For universal-style compaction, you can usually set it to `-1`. /// - /// Default: -1 + /// Default: `-1` /// /// # Example /// @@ -273,7 +273,7 @@ impl Options { /// This parameter should be set to true while storing data to /// filesystem like ext3 that can lose files after a reboot. /// - /// Default: false + /// Default: `false` /// /// # Example /// @@ -291,9 +291,9 @@ impl Options { /// written, asynchronously, in the background. This operation can be used /// to smooth out write I/Os over time. Users shouldn't rely on it for /// persistency guarantee. - /// Issue one request for every bytes_per_sync written. 0 turns it off. + /// Issue one request for every bytes_per_sync written. `0` turns it off. /// - /// Default: 0 + /// Default: `0` /// /// You may consider using rate_limiter to regulate write rate to device. /// When rate limiter is enabled, it automatically enables bytes_per_sync @@ -331,7 +331,7 @@ impl Options { /// cache. If the disk block is requested again this can result in /// additional disk I/O. /// - /// On WINDOWS system, files will be opened in "unbuffered I/O" mode + /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode /// which means that data read from the disk will not be cached or /// bufferized. The hardware buffer of the devices may however still /// be used. Memory mapped files are not impacted by this parameter. @@ -354,7 +354,7 @@ impl Options { /// Sets the number of shards used for table cache. /// - /// Default: 6 + /// Default: `6` /// /// # Example /// @@ -371,14 +371,14 @@ impl Options { } /// Sets the minimum number of write buffers that will be merged together - /// before writing to storage. If set to 1, then + /// before writing to storage. If set to `1`, then /// all write buffers are flushed to L0 as individual files and this increases /// read amplification because a get request has to check in all of these /// files. Also, an in-memory merge may result in writing lesser /// data to storage if there are duplicate records in each of these /// individual write buffers. /// - /// Default: 1 + /// Default: `1` /// /// # Example /// @@ -410,9 +410,9 @@ impl Options { /// Increasing this value can reduce the number of reads to SST files /// done for conflict detection. /// - /// Setting this value to 0 will cause write buffers to be freed immediately + /// Setting this value to `0` will cause write buffers to be freed immediately /// after they are flushed. - /// If this value is set to -1, 'max_write_buffer_number' will be used. + /// If this value is set to `-1`, 'max_write_buffer_number' will be used. /// /// Default: /// If using a TransactionDB/OptimisticTransactionDB, the default value will @@ -446,7 +446,7 @@ impl Options { /// Note that write_buffer_size is enforced per column family. /// See db_write_buffer_size for sharing memory across column families. /// - /// Default: 67108864 (64MiB) + /// Default: `0x4000000` (64MiB) /// /// Dynamically changeable through SetOptions() API /// @@ -473,7 +473,7 @@ impl Options { /// will be 200MB, total file size for level-2 will be 2GB, /// and total file size for level-3 will be 20GB. /// - /// Default: 268435456 (256MiB). + /// Default: `0x10000000` (256MiB). /// /// Dynamically changeable through SetOptions() API /// @@ -491,7 +491,7 @@ impl Options { } } - /// Default: 10 + /// Default: `10` /// /// # Example /// @@ -534,7 +534,7 @@ impl Options { /// be 2MB, and each file on level 2 will be 20MB, /// and each file on level-3 will be 200MB. /// - /// Default: 67108864 (64MiB) + /// Default: `0x4000000` (64MiB) /// /// Dynamically changeable through SetOptions() API /// @@ -553,14 +553,14 @@ impl Options { } /// Sets the minimum number of write buffers that will be merged together - /// before writing to storage. If set to 1, then + /// before writing to storage. If set to `1`, then /// all write buffers are flushed to L0 as individual files and this increases /// read amplification because a get request has to check in all of these /// files. Also, an in-memory merge may result in writing lesser /// data to storage if there are duplicate records in each of these /// individual write buffers. /// - /// Default: 1 + /// Default: `1` /// /// # Example /// @@ -576,10 +576,10 @@ impl Options { } } - /// Sets the number of files to trigger level-0 compaction. A value <0 means that + /// Sets the number of files to trigger level-0 compaction. A value < `0` means that /// level-0 compaction will not be triggered by number of files at all. /// - /// Default: 4 + /// Default: `4` /// /// Dynamically changeable through SetOptions() API /// @@ -598,10 +598,10 @@ impl Options { } /// Sets the soft limit on number of level-0 files. We start slowing down writes at this - /// point. A value <0 means that no writing slow down will be triggered by + /// point. A value < `0` means that no writing slow down will be triggered by /// number of files in level-0. /// - /// Default: 20 + /// Default: `20` /// /// Dynamically changeable through SetOptions() API /// @@ -621,7 +621,7 @@ impl Options { /// Sets the maximum number of level-0 files. We stop writes at this point. /// - /// Default: 24 + /// Default: `24` /// /// Dynamically changeable through SetOptions() API /// @@ -669,7 +669,7 @@ impl Options { /// LOW priority thread pool. For more information, see /// Env::SetBackgroundThreads /// - /// Default: 1 + /// Default: `1` /// /// # Example /// @@ -700,7 +700,7 @@ impl Options { /// HIGH priority thread pool. For more information, see /// Env::SetBackgroundThreads /// - /// Default: 1 + /// Default: `1` /// /// # Example /// @@ -719,7 +719,7 @@ impl Options { /// Disables automatic compactions. Manual compactions can still /// be issued on this column family /// - /// Default: false + /// Default: `false` /// /// Dynamically changeable through SetOptions() API /// @@ -741,9 +741,9 @@ impl Options { } } - /// Measure IO stats in compactions and flushes, if true. + /// Measure IO stats in compactions and flushes, if `true`. /// - /// Default: false + /// Default: `false` /// /// # Example /// @@ -759,7 +759,7 @@ impl Options { } } - /// Recovery mode to control the consistency while replaying WAL + /// Recovery mode to control the consistency while replaying WAL. /// /// Default: DBRecoveryMode::PointInTime /// @@ -797,9 +797,9 @@ impl Options { } } - /// If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`. /// - /// Default: 600 (10 min) + /// Default: `600` (10 mins) /// /// # Example /// @@ -815,7 +815,7 @@ impl Options { } } - /// Sets the number of levels for this database + /// Sets the number of levels for this database. pub fn set_num_levels(&mut self, n: c_int) { unsafe { ffi::rocksdb_options_set_num_levels(self.inner, n); From 8cb22e5f1bd5924d0e9464e90b6909661ce551eb Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Mon, 21 Nov 2016 16:55:54 +0000 Subject: [PATCH 3/8] Related enum type definitions to FFI. --- librocksdb-sys/src/lib.rs | 5 +++++ src/rocksdb.rs | 26 +++++++++++++------------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/librocksdb-sys/src/lib.rs b/librocksdb-sys/src/lib.rs index c5d7e77..fb1ce6c 100644 --- a/librocksdb-sys/src/lib.rs +++ b/librocksdb-sys/src/lib.rs @@ -958,6 +958,11 @@ pub const rocksdb_fifo_compaction: c_int = 2; pub const rocksdb_similar_size_compaction_stop_style: c_int = 0; pub const rocksdb_total_size_compaction_stop_style: c_int = 1; +pub const rocksdb_recovery_mode_tolerate_corrupted_tail_records: c_int = 0; +pub const rocksdb_recovery_mode_absolute_consistency: c_int = 1; +pub const rocksdb_recovery_mode_point_in_time: c_int = 2; +pub const rocksdb_recovery_mode_skip_any_corrupted_record: c_int = 3; + pub enum rocksdb_t { } pub enum rocksdb_backup_engine_t { } diff --git a/src/rocksdb.rs b/src/rocksdb.rs index 2a970a0..320716d 100644 --- a/src/rocksdb.rs +++ b/src/rocksdb.rs @@ -49,27 +49,27 @@ unsafe impl Sync for DB {} #[derive(Debug, Copy, Clone, PartialEq)] pub enum DBCompressionType { - None = 0, - Snappy = 1, - Zlib = 2, - Bz2 = 3, - Lz4 = 4, - Lz4hc = 5, + None = ffi::rocksdb_no_compression as isize, + Snappy = ffi::rocksdb_snappy_compression as isize, + Zlib = ffi::rocksdb_zlib_compression as isize, + Bz2 = ffi::rocksdb_bz2_compression as isize, + Lz4 = ffi::rocksdb_lz4_compression as isize, + Lz4hc = ffi::rocksdb_lz4hc_compression as isize, } #[derive(Debug, Copy, Clone, PartialEq)] pub enum DBCompactionStyle { - Level = 0, - Universal = 1, - Fifo = 2, + Level = ffi::rocksdb_level_compaction as isize, + Universal = ffi::rocksdb_universal_compaction as isize, + Fifo = ffi::rocksdb_fifo_compaction as isize, } #[derive(Debug, Copy, Clone, PartialEq)] pub enum DBRecoveryMode { - TolerateCorruptedTailRecords = 0, - AbsoluteConsistency = 1, - PointInTime = 2, - SkipAnyCorruptedRecords = 3, + TolerateCorruptedTailRecords = ffi::rocksdb_recovery_mode_tolerate_corrupted_tail_records as isize, + AbsoluteConsistency = ffi::rocksdb_recovery_mode_absolute_consistency as isize, + PointInTime = ffi::rocksdb_recovery_mode_point_in_time as isize, + SkipAnyCorruptedRecord = ffi::rocksdb_recovery_mode_skip_any_corrupted_record as isize, } /// An atomic batch of write operations. From e67dc3ec7f3d15f2b3aa2fcfd20556b46ed87982 Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Mon, 21 Nov 2016 17:03:22 +0000 Subject: [PATCH 4/8] Fixed method names. --- src/rocksdb_options.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocksdb_options.rs b/src/rocksdb_options.rs index 27561a6..8b6164e 100644 --- a/src/rocksdb_options.rs +++ b/src/rocksdb_options.rs @@ -183,7 +183,7 @@ impl Options { /// DBCompressionType::Snappy /// ]); /// ``` - pub fn compression_per_level(&mut self, level_types: &[DBCompressionType]) { + pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) { unsafe { let level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect(); ffi::rocksdb_options_set_compression_per_level(self.inner, @@ -346,7 +346,7 @@ impl Options { /// let mut opts = Options::default(); /// opts.allow_os_buffer(false); /// ``` - pub fn allow_os_buffer(&mut self, is_allow: bool) { + pub fn set_allow_os_buffer(&mut self, is_allow: bool) { unsafe { ffi::rocksdb_options_set_allow_os_buffer(self.inner, is_allow as c_uchar); } From 75622d36605aea079305dd42937b3dec06aac651 Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sat, 26 Nov 2016 16:30:25 +0000 Subject: [PATCH 5/8] Fixed method names in doc tests. --- src/rocksdb_options.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocksdb_options.rs b/src/rocksdb_options.rs index 8b6164e..c36a866 100644 --- a/src/rocksdb_options.rs +++ b/src/rocksdb_options.rs @@ -175,7 +175,7 @@ impl Options { /// use rocksdb::{Options, DBCompressionType}; /// /// let mut opts = Options::default(); - /// opts.compression_per_level(&[ + /// opts.set_compression_per_level(&[ /// DBCompressionType::None, /// DBCompressionType::None, /// DBCompressionType::Snappy, @@ -344,7 +344,7 @@ impl Options { /// use rocksdb::Options; /// /// let mut opts = Options::default(); - /// opts.allow_os_buffer(false); + /// opts.set_allow_os_buffer(false); /// ``` pub fn set_allow_os_buffer(&mut self, is_allow: bool) { unsafe { From d28eba2ff5ef41a656136e04fb16291308f6722a Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sat, 26 Nov 2016 23:40:27 +0000 Subject: [PATCH 6/8] Added basic support for backups. --- librocksdb-sys/src/test.rs | 5 +- src/backup.rs | 100 +++++++++++++++++++++++++++++++++++++ src/lib.rs | 49 +++++++++++++++++- src/rocksdb.rs | 67 +++++-------------------- src/rocksdb_options.rs | 7 ++- 5 files changed, 164 insertions(+), 64 deletions(-) create mode 100644 src/backup.rs diff --git a/librocksdb-sys/src/test.rs b/librocksdb-sys/src/test.rs index 6d3aa79..d7718a4 100644 --- a/librocksdb-sys/src/test.rs +++ b/librocksdb-sys/src/test.rs @@ -41,11 +41,10 @@ fn internal() { let rustpath = "_rust_rocksdb_internaltest"; let cpath = CString::new(rustpath).unwrap(); - let cpath_ptr = cpath.as_ptr(); let mut err: *mut c_char = ptr::null_mut(); let err_ptr: *mut *mut c_char = &mut err; - let db = rocksdb_open(opts, cpath_ptr as *const _, err_ptr); + let db = rocksdb_open(opts, cpath.as_ptr() as *const _, err_ptr); if !err.is_null() { println!("failed to open rocksdb: {}", error_message(err)); } @@ -80,7 +79,7 @@ fn internal() { rocksdb_readoptions_destroy(readopts); assert!(err.is_null()); rocksdb_close(db); - rocksdb_destroy_db(opts, cpath_ptr as *const _, err_ptr); + rocksdb_destroy_db(opts, cpath.as_ptr() as *const _, err_ptr); assert!(err.is_null()); } } diff --git a/src/backup.rs b/src/backup.rs new file mode 100644 index 0000000..13572a4 --- /dev/null +++ b/src/backup.rs @@ -0,0 +1,100 @@ +// Copyright 2016 Alex Regueiro +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use std::ffi::CString; +use std::path::{Path}; + +use libc::{uint32_t}; + +use {DB, Error}; +use ffi; + +pub struct BackupEngine { + inner: *mut ffi::rocksdb_backup_engine_t, +} + +pub struct BackupEngineOptions { + inner: *mut ffi::rocksdb_options_t, +} + +impl BackupEngine { + /// Open a backup engine with the specified options. + pub fn open>(opts: &BackupEngineOptions, path: P) -> Result { + let path = path.as_ref(); + let cpath = match CString::new(path.to_string_lossy().as_bytes()) { + Ok(c) => c, + Err(_) => { + return Err(Error::new("Failed to convert path to CString \ + when opening backup engine" + .to_owned())) + } + }; + + let be: *mut ffi::rocksdb_backup_engine_t; + unsafe { + be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) + } + + if be.is_null() { + return Err(Error::new("Could not initialize backup engine.".to_owned())); + } + + Ok(BackupEngine { + inner: be, + }) + } + + fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> { + unsafe { + ffi_try!(ffi::rocksdb_backup_engine_create_new_backup(self.inner, db.inner)); + Ok(()) + } + } + + fn purge_old_backups(&mut self, num_backups_to_keep: usize) -> Result<(), Error> { + unsafe { + ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(self.inner, num_backups_to_keep as uint32_t)); + Ok(()) + } + } +} + +impl Default for BackupEngineOptions { + fn default() -> BackupEngineOptions { + unsafe { + let opts = ffi::rocksdb_options_create(); + if opts.is_null() { + panic!("Could not create backup options".to_owned()); + } + BackupEngineOptions { inner: opts } + } + } +} + +impl Drop for BackupEngine { + fn drop(&mut self) { + unsafe { + ffi::rocksdb_backup_engine_close(self.inner); + } + } +} + +impl Drop for BackupEngineOptions { + fn drop(&mut self) { + unsafe { + ffi::rocksdb_options_destroy(self.inner); + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 0854799..9596ff2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,15 +37,60 @@ extern crate librocksdb_sys as ffi; #[macro_use] mod ffi_util; +pub mod backup; mod comparator; pub mod merge_operator; mod rocksdb; mod rocksdb_options; -pub use rocksdb::{DB, DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector, - Direction, Error, IteratorMode, Snapshot, WriteBatch, new_bloom_filter}; +use std::collections::BTreeMap; +use std::error; +use std::fmt; +use std::path::{PathBuf}; pub use merge_operator::MergeOperands; +pub use rocksdb::{DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector, + Direction, IteratorMode, Snapshot, WriteBatch, new_bloom_filter}; + +/// A RocksDB database. +pub struct DB { + inner: *mut ffi::rocksdb_t, + cfs: BTreeMap, + path: PathBuf, +} + +#[derive(Debug, PartialEq)] +pub struct Error { + message: String, +} + +impl Error { + fn new(message: String) -> Error { + Error { message: message } + } + + pub fn to_string(self) -> String { + self.into() + } +} + +impl From for String { + fn from(e: Error) -> String { + e.message + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + &self.message + } +} + +impl fmt::Display for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + self.message.fmt(formatter) + } +} /// For configuring block-based file storage. pub struct BlockBasedOptions { diff --git a/src/rocksdb.rs b/src/rocksdb.rs index 320716d..d529868 100644 --- a/src/rocksdb.rs +++ b/src/rocksdb.rs @@ -14,19 +14,18 @@ // use std::collections::BTreeMap; -use std::error; use std::ffi::CString; use std::fmt; use std::fs; use std::ops::Deref; -use std::path::{Path, PathBuf}; +use std::path::{Path}; use std::ptr; use std::slice; use std::str; use libc::{self, c_char, c_int, c_uchar, c_void, size_t}; -use {Options, WriteOptions}; +use {DB, Error, Options, WriteOptions}; use ffi; pub fn new_bloom_filter(bits: c_int) -> *mut ffi::rocksdb_filterpolicy_t { @@ -37,13 +36,6 @@ pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t { unsafe { ffi::rocksdb_cache_create_lru(capacity) } } -/// A RocksDB database. -pub struct DB { - inner: *mut ffi::rocksdb_t, - cfs: BTreeMap, - path: PathBuf, -} - unsafe impl Send for DB {} unsafe impl Sync for DB {} @@ -151,39 +143,6 @@ pub enum Direction { pub type KVBytes = (Box<[u8]>, Box<[u8]>); -#[derive(Debug, PartialEq)] -pub struct Error { - message: String, -} - -impl Error { - fn new(message: String) -> Error { - Error { message: message } - } - - pub fn to_string(self) -> String { - self.into() - } -} - -impl From for String { - fn from(e: Error) -> String { - e.message - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - &self.message - } -} - -impl fmt::Display for Error { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { - self.message.fmt(formatter) - } -} - impl Iterator for DBIterator { type Item = KVBytes; @@ -357,23 +316,21 @@ impl DB { /// /// # Panics /// - /// * Panics if the column family doesn't exist + /// * Panics if the column family doesn't exist. pub fn open_cf>(opts: &Options, path: P, cfs: &[&str]) -> Result { let path = path.as_ref(); - let cpath = match CString::new(path.to_string_lossy().as_bytes()) { Ok(c) => c, Err(_) => { return Err(Error::new("Failed to convert path to CString \ - when opening rocksdb" + when opening DB." .to_owned())) } }; - let cpath_ptr = cpath.as_ptr(); if let Err(e) = fs::create_dir_all(&path) { - return Err(Error::new(format!("Failed to create rocksdb \ - directory: {:?}", + return Err(Error::new(format!("Failed to create RocksDB\ + directory: `{:?}`.", e))); } @@ -382,7 +339,7 @@ impl DB { if cfs.len() == 0 { unsafe { - db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath_ptr as *const _)); + db = ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr() as *const _)); } } else { let mut cfs_v = cfs.to_vec(); @@ -409,7 +366,7 @@ impl DB { unsafe { db = ffi_try!(ffi::rocksdb_open_column_families(opts.inner, - cpath_ptr as *const _, + cpath.as_ptr() as *const _, cfs_v.len() as c_int, cfnames.as_ptr() as *const _, cfopts.as_ptr(), @@ -479,10 +436,10 @@ impl DB { pub fn get_opt(&self, key: &[u8], readopts: &ReadOptions) -> Result, Error> { if readopts.inner.is_null() { - return Err(Error::new("Unable to create rocksdb read options. \ + return Err(Error::new("Unable to create RocksDB read options. \ This is a fairly trivial call, and its \ failure may be indicative of a \ - mis-compiled or mis-loaded rocksdb \ + mis-compiled or mis-loaded RocksDB \ library." .to_owned())); } @@ -513,10 +470,10 @@ impl DB { readopts: &ReadOptions) -> Result, Error> { if readopts.inner.is_null() { - return Err(Error::new("Unable to create rocksdb read options. \ + return Err(Error::new("Unable to create RocksDB read options. \ This is a fairly trivial call, and its \ failure may be indicative of a \ - mis-compiled or mis-loaded rocksdb \ + mis-compiled or mis-loaded RocksDB \ library." .to_owned())); } diff --git a/src/rocksdb_options.rs b/src/rocksdb_options.rs index c36a866..f134e4e 100644 --- a/src/rocksdb_options.rs +++ b/src/rocksdb_options.rs @@ -88,7 +88,7 @@ impl Default for BlockBasedOptions { fn default() -> BlockBasedOptions { let block_opts = unsafe { ffi::rocksdb_block_based_options_create() }; if block_opts.is_null() { - panic!("Could not create rocksdb block based options".to_owned()); + panic!("Could not create RocksDB block based options"); } BlockBasedOptions { inner: block_opts } } @@ -828,14 +828,13 @@ impl Default for Options { unsafe { let opts = ffi::rocksdb_options_create(); if opts.is_null() { - panic!("Could not create rocksdb options".to_owned()); + panic!("Could not create RocksDB options"); } Options { inner: opts } } } } - impl WriteOptions { pub fn new() -> WriteOptions { WriteOptions::default() @@ -858,7 +857,7 @@ impl Default for WriteOptions { fn default() -> WriteOptions { let write_opts = unsafe { ffi::rocksdb_writeoptions_create() }; if write_opts.is_null() { - panic!("Could not create rocksdb write options".to_owned()); + panic!("Could not create RocksDB write options"); } WriteOptions { inner: write_opts } } From 12d79986b2269b43a6cb808077a3aac6880a275d Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sat, 26 Nov 2016 23:55:46 +0000 Subject: [PATCH 7/8] Changed rocksdb module name prefix to db. Also rustfmt on source. --- src/backup.rs | 61 ++++++++++++++++++----- src/comparator.rs | 4 +- src/{rocksdb.rs => db.rs} | 16 +++--- src/{rocksdb_options.rs => db_options.rs} | 16 +++--- src/lib.rs | 14 +++--- src/merge_operator.rs | 4 +- 6 files changed, 75 insertions(+), 40 deletions(-) rename src/{rocksdb.rs => db.rs} (99%) rename src/{rocksdb_options.rs => db_options.rs} (99%) diff --git a/src/backup.rs b/src/backup.rs index 13572a4..a49f848 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -13,14 +13,14 @@ // limitations under the License. // -use std::ffi::CString; -use std::path::{Path}; - -use libc::{uint32_t}; use {DB, Error}; use ffi; +use libc::{c_int, uint32_t}; +use std::ffi::CString; +use std::path::Path; + pub struct BackupEngine { inner: *mut ffi::rocksdb_backup_engine_t, } @@ -29,9 +29,15 @@ pub struct BackupEngineOptions { inner: *mut ffi::rocksdb_options_t, } +pub struct RestoreOptions { + inner: *mut ffi::rocksdb_restore_options_t, +} + impl BackupEngine { /// Open a backup engine with the specified options. - pub fn open>(opts: &BackupEngineOptions, path: P) -> Result { + pub fn open>(opts: &BackupEngineOptions, + path: P) + -> Result { let path = path.as_ref(); let cpath = match CString::new(path.to_string_lossy().as_bytes()) { Ok(c) => c, @@ -43,17 +49,13 @@ impl BackupEngine { }; let be: *mut ffi::rocksdb_backup_engine_t; - unsafe { - be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) - } + unsafe { be = ffi_try!(ffi::rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) } if be.is_null() { return Err(Error::new("Could not initialize backup engine.".to_owned())); } - Ok(BackupEngine { - inner: be, - }) + Ok(BackupEngine { inner: be }) } fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> { @@ -65,24 +67,49 @@ impl BackupEngine { fn purge_old_backups(&mut self, num_backups_to_keep: usize) -> Result<(), Error> { unsafe { - ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(self.inner, num_backups_to_keep as uint32_t)); + ffi_try!(ffi::rocksdb_backup_engine_purge_old_backups(self.inner, + num_backups_to_keep as uint32_t)); Ok(()) } } } +impl BackupEngineOptions { + // +} + +impl RestoreOptions { + pub fn set_keep_log_files(&mut self, keep_log_files: bool) { + unsafe { + ffi::rocksdb_restore_options_set_keep_log_files(self.inner, keep_log_files as c_int); + } + } +} + impl Default for BackupEngineOptions { fn default() -> BackupEngineOptions { unsafe { let opts = ffi::rocksdb_options_create(); if opts.is_null() { - panic!("Could not create backup options".to_owned()); + panic!("Could not create RocksDB backup options".to_owned()); } BackupEngineOptions { inner: opts } } } } +impl Default for RestoreOptions { + fn default() -> RestoreOptions { + unsafe { + let opts = ffi::rocksdb_restore_options_create(); + if opts.is_null() { + panic!("Could not create RocksDB restore options".to_owned()); + } + RestoreOptions { inner: opts } + } + } +} + impl Drop for BackupEngine { fn drop(&mut self) { unsafe { @@ -98,3 +125,11 @@ impl Drop for BackupEngineOptions { } } } + +impl Drop for RestoreOptions { + fn drop(&mut self) { + unsafe { + ffi::rocksdb_restore_options_destroy(self.inner); + } + } +} diff --git a/src/comparator.rs b/src/comparator.rs index ae998d9..d75bb7b 100644 --- a/src/comparator.rs +++ b/src/comparator.rs @@ -13,13 +13,13 @@ // limitations under the License. // + +use libc::{c_char, c_int, c_void, size_t}; use std::cmp::Ordering; use std::ffi::CString; use std::mem; use std::slice; -use libc::{c_char, c_int, c_void, size_t}; - pub type CompareFn = fn(&[u8], &[u8]) -> Ordering; pub struct ComparatorCallback { diff --git a/src/rocksdb.rs b/src/db.rs similarity index 99% rename from src/rocksdb.rs rename to src/db.rs index d529868..7dcfb0c 100644 --- a/src/rocksdb.rs +++ b/src/db.rs @@ -13,29 +13,25 @@ // limitations under the License. // + +use {DB, Error, Options, WriteOptions}; +use ffi; + +use libc::{self, c_char, c_int, c_uchar, c_void, size_t}; use std::collections::BTreeMap; use std::ffi::CString; use std::fmt; use std::fs; use std::ops::Deref; -use std::path::{Path}; +use std::path::Path; use std::ptr; use std::slice; use std::str; -use libc::{self, c_char, c_int, c_uchar, c_void, size_t}; - -use {DB, Error, Options, WriteOptions}; -use ffi; - pub fn new_bloom_filter(bits: c_int) -> *mut ffi::rocksdb_filterpolicy_t { unsafe { ffi::rocksdb_filterpolicy_create_bloom(bits) } } -pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t { - unsafe { ffi::rocksdb_cache_create_lru(capacity) } -} - unsafe impl Send for DB {} unsafe impl Sync for DB {} diff --git a/src/rocksdb_options.rs b/src/db_options.rs similarity index 99% rename from src/rocksdb_options.rs rename to src/db_options.rs index f134e4e..6ab3c96 100644 --- a/src/rocksdb_options.rs +++ b/src/db_options.rs @@ -13,17 +13,21 @@ // limitations under the License. // -use std::ffi::{CStr, CString}; -use std::mem; -use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t}; - -use {BlockBasedOptions, Options, WriteOptions}; +use {BlockBasedOptions, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Options, + WriteOptions}; use comparator::{self, ComparatorCallback, CompareFn}; use ffi; + +use libc::{self, c_int, c_uchar, c_uint, c_void, size_t, uint64_t}; use merge_operator::{self, MergeFn, MergeOperatorCallback, full_merge_callback, partial_merge_callback}; -use rocksdb::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, new_cache}; +use std::ffi::{CStr, CString}; +use std::mem; + +pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t { + unsafe { ffi::rocksdb_cache_create_lru(capacity) } +} impl Drop for Options { fn drop(&mut self) { diff --git a/src/lib.rs b/src/lib.rs index 9596ff2..5a54a83 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,17 +40,17 @@ mod ffi_util; pub mod backup; mod comparator; pub mod merge_operator; -mod rocksdb; -mod rocksdb_options; +mod db; +mod db_options; +pub use db::{DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector, + Direction, IteratorMode, Snapshot, WriteBatch, new_bloom_filter}; + +pub use merge_operator::MergeOperands; use std::collections::BTreeMap; use std::error; use std::fmt; -use std::path::{PathBuf}; - -pub use merge_operator::MergeOperands; -pub use rocksdb::{DBCompactionStyle, DBCompressionType, DBIterator, DBRecoveryMode, DBVector, - Direction, IteratorMode, Snapshot, WriteBatch, new_bloom_filter}; +use std::path::PathBuf; /// A RocksDB database. pub struct DB { diff --git a/src/merge_operator.rs b/src/merge_operator.rs index 8980ee0..2f9df3f 100644 --- a/src/merge_operator.rs +++ b/src/merge_operator.rs @@ -53,13 +53,13 @@ //! } //! ``` + +use libc::{self, c_char, c_int, c_void, size_t}; use std::ffi::CString; use std::mem; use std::ptr; use std::slice; -use libc::{self, c_char, c_int, c_void, size_t}; - pub type MergeFn = fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Vec; pub struct MergeOperatorCallback { From 8eef873c92ab523a1fe0bf1af08f5f35e3588676 Mon Sep 17 00:00:00 2001 From: Alexander Regueiro Date: Sun, 27 Nov 2016 00:04:12 +0000 Subject: [PATCH 8/8] Fixed import for test. --- src/merge_operator.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/merge_operator.rs b/src/merge_operator.rs index 2f9df3f..e299cd9 100644 --- a/src/merge_operator.rs +++ b/src/merge_operator.rs @@ -199,8 +199,7 @@ fn test_provided_merge(new_key: &[u8], #[test] fn mergetest() { - use Options; - use rocksdb::DB; + use {DB, Options}; let path = "_rust_rocksdb_mergetest"; let mut opts = Options::default();