Refactor db.rs and lib.rs into smaller pieces (#395)
parent
f71953dcd9
commit
8817fc7b1c
@ -0,0 +1,44 @@ |
||||
// Copyright 2020 Tyler Neely
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{ffi, Options}; |
||||
|
||||
/// A descriptor for a RocksDB column family.
|
||||
///
|
||||
/// A description of the column family, containing the name and `Options`.
|
||||
pub struct ColumnFamilyDescriptor { |
||||
pub(crate) name: String, |
||||
pub(crate) options: Options, |
||||
} |
||||
|
||||
impl ColumnFamilyDescriptor { |
||||
// Create a new column family descriptor with the specified name and options.
|
||||
pub fn new<S>(name: S, options: Options) -> Self |
||||
where |
||||
S: Into<String>, |
||||
{ |
||||
ColumnFamilyDescriptor { |
||||
name: name.into(), |
||||
options, |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// An opaque type used to represent a column family. Returned from some functions, and used
|
||||
/// in others
|
||||
pub struct ColumnFamily { |
||||
pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, |
||||
} |
||||
|
||||
unsafe impl Send for ColumnFamily {} |
@ -0,0 +1,541 @@ |
||||
// Copyright 2020 Tyler Neely
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{ffi, ColumnFamily, Error, ReadOptions, WriteBatch, DB}; |
||||
use libc::{c_char, c_uchar, size_t}; |
||||
use std::marker::PhantomData; |
||||
use std::slice; |
||||
|
||||
/// An iterator over a database or column family, with specifiable
|
||||
/// ranges and direction.
|
||||
///
|
||||
/// This iterator is different to the standard ``DBIterator`` as it aims Into
|
||||
/// replicate the underlying iterator API within RocksDB itself. This should
|
||||
/// give access to more performance and flexibility but departs from the
|
||||
/// widely recognised Rust idioms.
|
||||
///
|
||||
/// ```
|
||||
/// use rocksdb::{DB, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage4";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.raw_iterator();
|
||||
///
|
||||
/// // Forwards iteration
|
||||
/// iter.seek_to_first();
|
||||
/// while iter.valid() {
|
||||
/// println!("Saw {:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.next();
|
||||
/// }
|
||||
///
|
||||
/// // Reverse iteration
|
||||
/// iter.seek_to_last();
|
||||
/// while iter.valid() {
|
||||
/// println!("Saw {:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.prev();
|
||||
/// }
|
||||
///
|
||||
/// // Seeking
|
||||
/// iter.seek(b"my key");
|
||||
/// while iter.valid() {
|
||||
/// println!("Saw {:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.next();
|
||||
/// }
|
||||
///
|
||||
/// // Reverse iteration from key
|
||||
/// // Note, use seek_for_prev when reversing because if this key doesn't exist,
|
||||
/// // this will make the iterator start from the previous key rather than the next.
|
||||
/// iter.seek_for_prev(b"my key");
|
||||
/// while iter.valid() {
|
||||
/// println!("Saw {:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.prev();
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub struct DBRawIterator<'a> { |
||||
inner: *mut ffi::rocksdb_iterator_t, |
||||
db: PhantomData<&'a DB>, |
||||
} |
||||
|
||||
impl<'a> DBRawIterator<'a> { |
||||
pub(crate) fn new(db: &DB, readopts: &ReadOptions) -> DBRawIterator<'a> { |
||||
unsafe { |
||||
DBRawIterator { |
||||
inner: ffi::rocksdb_create_iterator(db.inner, readopts.inner), |
||||
db: PhantomData, |
||||
} |
||||
} |
||||
} |
||||
|
||||
pub(crate) fn new_cf( |
||||
db: &DB, |
||||
cf_handle: &ColumnFamily, |
||||
readopts: &ReadOptions, |
||||
) -> DBRawIterator<'a> { |
||||
unsafe { |
||||
DBRawIterator { |
||||
inner: ffi::rocksdb_create_iterator_cf(db.inner, readopts.inner, cf_handle.inner), |
||||
db: PhantomData, |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Returns `true` if the iterator is valid. An iterator is invalidated when
|
||||
/// it reaches the end of its defined range, or when it encounters an error.
|
||||
///
|
||||
/// To check whether the iterator encountered an error after `valid` has
|
||||
/// returned `false`, use the [`status`](DBRawIterator::status) method. `status` will never
|
||||
/// return an error when `valid` is `true`.
|
||||
pub fn valid(&self) -> bool { |
||||
unsafe { ffi::rocksdb_iter_valid(self.inner) != 0 } |
||||
} |
||||
|
||||
/// Returns an error `Result` if the iterator has encountered an error
|
||||
/// during operation. When an error is encountered, the iterator is
|
||||
/// invalidated and [`valid`](DBRawIterator::valid) will return `false` when called.
|
||||
///
|
||||
/// Performing a seek will discard the current status.
|
||||
pub fn status(&self) -> Result<(), Error> { |
||||
unsafe { |
||||
ffi_try!(ffi::rocksdb_iter_get_error(self.inner)); |
||||
} |
||||
Ok(()) |
||||
} |
||||
|
||||
/// Seeks to the first key in the database.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rocksdb::{DB, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage5";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.raw_iterator();
|
||||
///
|
||||
/// // Iterate all keys from the start in lexicographic order
|
||||
/// iter.seek_to_first();
|
||||
///
|
||||
/// while iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.next();
|
||||
/// }
|
||||
///
|
||||
/// // Read just the first key
|
||||
/// iter.seek_to_first();
|
||||
///
|
||||
/// if iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// } else {
|
||||
/// // There are no keys in the database
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub fn seek_to_first(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_iter_seek_to_first(self.inner); |
||||
} |
||||
} |
||||
|
||||
/// Seeks to the last key in the database.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rocksdb::{DB, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage6";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.raw_iterator();
|
||||
///
|
||||
/// // Iterate all keys from the end in reverse lexicographic order
|
||||
/// iter.seek_to_last();
|
||||
///
|
||||
/// while iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// iter.prev();
|
||||
/// }
|
||||
///
|
||||
/// // Read just the last key
|
||||
/// iter.seek_to_last();
|
||||
///
|
||||
/// if iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// } else {
|
||||
/// // There are no keys in the database
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub fn seek_to_last(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_iter_seek_to_last(self.inner); |
||||
} |
||||
} |
||||
|
||||
/// Seeks to the specified key or the first key that lexicographically follows it.
|
||||
///
|
||||
/// This method will attempt to seek to the specified key. If that key does not exist, it will
|
||||
/// find and seek to the key that lexicographically follows it instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rocksdb::{DB, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage7";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.raw_iterator();
|
||||
///
|
||||
/// // Read the first key that starts with 'a'
|
||||
/// iter.seek(b"a");
|
||||
///
|
||||
/// if iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// } else {
|
||||
/// // There are no keys in the database
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub fn seek<K: AsRef<[u8]>>(&mut self, key: K) { |
||||
let key = key.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_iter_seek( |
||||
self.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Seeks to the specified key, or the first key that lexicographically precedes it.
|
||||
///
|
||||
/// Like ``.seek()`` this method will attempt to seek to the specified key.
|
||||
/// The difference with ``.seek()`` is that if the specified key do not exist, this method will
|
||||
/// seek to key that lexicographically precedes it instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rocksdb::{DB, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage8";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.raw_iterator();
|
||||
///
|
||||
/// // Read the last key that starts with 'a'
|
||||
/// iter.seek_for_prev(b"b");
|
||||
///
|
||||
/// if iter.valid() {
|
||||
/// println!("{:?} {:?}", iter.key(), iter.value());
|
||||
/// } else {
|
||||
/// // There are no keys in the database
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub fn seek_for_prev<K: AsRef<[u8]>>(&mut self, key: K) { |
||||
let key = key.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_iter_seek_for_prev( |
||||
self.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Seeks to the next key.
|
||||
///
|
||||
/// Returns true if the iterator is valid after this operation.
|
||||
pub fn next(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_iter_next(self.inner); |
||||
} |
||||
} |
||||
|
||||
/// Seeks to the previous key.
|
||||
///
|
||||
/// Returns true if the iterator is valid after this operation.
|
||||
pub fn prev(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_iter_prev(self.inner); |
||||
} |
||||
} |
||||
|
||||
/// Returns a slice of the current key.
|
||||
pub fn key(&self) -> Option<&[u8]> { |
||||
if self.valid() { |
||||
// Safety Note: This is safe as all methods that may invalidate the buffer returned
|
||||
// take `&mut self`, so borrow checker will prevent use of buffer after seek.
|
||||
unsafe { |
||||
let mut key_len: size_t = 0; |
||||
let key_len_ptr: *mut size_t = &mut key_len; |
||||
let key_ptr = ffi::rocksdb_iter_key(self.inner, key_len_ptr) as *const c_uchar; |
||||
|
||||
Some(slice::from_raw_parts(key_ptr, key_len as usize)) |
||||
} |
||||
} else { |
||||
None |
||||
} |
||||
} |
||||
|
||||
/// Returns a slice of the current value.
|
||||
pub fn value(&self) -> Option<&[u8]> { |
||||
if self.valid() { |
||||
// Safety Note: This is safe as all methods that may invalidate the buffer returned
|
||||
// take `&mut self`, so borrow checker will prevent use of buffer after seek.
|
||||
unsafe { |
||||
let mut val_len: size_t = 0; |
||||
let val_len_ptr: *mut size_t = &mut val_len; |
||||
let val_ptr = ffi::rocksdb_iter_value(self.inner, val_len_ptr) as *const c_uchar; |
||||
|
||||
Some(slice::from_raw_parts(val_ptr, val_len as usize)) |
||||
} |
||||
} else { |
||||
None |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<'a> Drop for DBRawIterator<'a> { |
||||
fn drop(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_iter_destroy(self.inner); |
||||
} |
||||
} |
||||
} |
||||
|
||||
unsafe impl<'a> Send for DBRawIterator<'a> {} |
||||
unsafe impl<'a> Sync for DBRawIterator<'a> {} |
||||
|
||||
/// An iterator over a database or column family, with specifiable
|
||||
/// ranges and direction.
|
||||
///
|
||||
/// ```
|
||||
/// use rocksdb::{DB, Direction, IteratorMode, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage2";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut iter = db.iterator(IteratorMode::Start); // Always iterates forward
|
||||
/// for (key, value) in iter {
|
||||
/// println!("Saw {:?} {:?}", key, value);
|
||||
/// }
|
||||
/// iter = db.iterator(IteratorMode::End); // Always iterates backward
|
||||
/// for (key, value) in iter {
|
||||
/// println!("Saw {:?} {:?}", key, value);
|
||||
/// }
|
||||
/// iter = db.iterator(IteratorMode::From(b"my key", Direction::Forward)); // From a key in Direction::{forward,reverse}
|
||||
/// for (key, value) in iter {
|
||||
/// println!("Saw {:?} {:?}", key, value);
|
||||
/// }
|
||||
///
|
||||
/// // You can seek with an existing Iterator instance, too
|
||||
/// iter = db.iterator(IteratorMode::Start);
|
||||
/// iter.set_mode(IteratorMode::From(b"another key", Direction::Reverse));
|
||||
/// for (key, value) in iter {
|
||||
/// println!("Saw {:?} {:?}", key, value);
|
||||
/// }
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub struct DBIterator<'a> { |
||||
raw: DBRawIterator<'a>, |
||||
direction: Direction, |
||||
just_seeked: bool, |
||||
} |
||||
|
||||
pub enum Direction { |
||||
Forward, |
||||
Reverse, |
||||
} |
||||
|
||||
pub type KVBytes = (Box<[u8]>, Box<[u8]>); |
||||
|
||||
pub enum IteratorMode<'a> { |
||||
Start, |
||||
End, |
||||
From(&'a [u8], Direction), |
||||
} |
||||
|
||||
impl<'a> DBIterator<'a> { |
||||
pub(crate) fn new(db: &DB, readopts: &ReadOptions, mode: IteratorMode) -> DBIterator<'a> { |
||||
let mut rv = DBIterator { |
||||
raw: DBRawIterator::new(db, readopts), |
||||
direction: Direction::Forward, // blown away by set_mode()
|
||||
just_seeked: false, |
||||
}; |
||||
rv.set_mode(mode); |
||||
rv |
||||
} |
||||
|
||||
pub(crate) fn new_cf( |
||||
db: &DB, |
||||
cf_handle: &ColumnFamily, |
||||
readopts: &ReadOptions, |
||||
mode: IteratorMode, |
||||
) -> DBIterator<'a> { |
||||
let mut rv = DBIterator { |
||||
raw: DBRawIterator::new_cf(db, cf_handle, readopts), |
||||
direction: Direction::Forward, // blown away by set_mode()
|
||||
just_seeked: false, |
||||
}; |
||||
rv.set_mode(mode); |
||||
rv |
||||
} |
||||
|
||||
pub fn set_mode(&mut self, mode: IteratorMode) { |
||||
match mode { |
||||
IteratorMode::Start => { |
||||
self.raw.seek_to_first(); |
||||
self.direction = Direction::Forward; |
||||
} |
||||
IteratorMode::End => { |
||||
self.raw.seek_to_last(); |
||||
self.direction = Direction::Reverse; |
||||
} |
||||
IteratorMode::From(key, Direction::Forward) => { |
||||
self.raw.seek(key); |
||||
self.direction = Direction::Forward; |
||||
} |
||||
IteratorMode::From(key, Direction::Reverse) => { |
||||
self.raw.seek_for_prev(key); |
||||
self.direction = Direction::Reverse; |
||||
} |
||||
}; |
||||
|
||||
self.just_seeked = true; |
||||
} |
||||
|
||||
/// See [`valid`](DBRawIterator::valid)
|
||||
pub fn valid(&self) -> bool { |
||||
self.raw.valid() |
||||
} |
||||
|
||||
/// See [`status`](DBRawIterator::status)
|
||||
pub fn status(&self) -> Result<(), Error> { |
||||
self.raw.status() |
||||
} |
||||
} |
||||
|
||||
impl<'a> Iterator for DBIterator<'a> { |
||||
type Item = KVBytes; |
||||
|
||||
fn next(&mut self) -> Option<KVBytes> { |
||||
if !self.raw.valid() { |
||||
return None; |
||||
} |
||||
|
||||
// Initial call to next() after seeking should not move the iterator
|
||||
// or the first item will not be returned
|
||||
if !self.just_seeked { |
||||
match self.direction { |
||||
Direction::Forward => self.raw.next(), |
||||
Direction::Reverse => self.raw.prev(), |
||||
} |
||||
} else { |
||||
self.just_seeked = false; |
||||
} |
||||
|
||||
if self.raw.valid() { |
||||
// .key() and .value() only ever return None if valid == false, which we've just cheked
|
||||
Some(( |
||||
Box::from(self.raw.key().unwrap()), |
||||
Box::from(self.raw.value().unwrap()), |
||||
)) |
||||
} else { |
||||
None |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<'a> Into<DBRawIterator<'a>> for DBIterator<'a> { |
||||
fn into(self) -> DBRawIterator<'a> { |
||||
self.raw |
||||
} |
||||
} |
||||
|
||||
/// Iterates the batches of writes since a given sequence number.
|
||||
///
|
||||
/// `DBWALIterator` is returned by `DB::get_updates_since()` and will return the
|
||||
/// batches of write operations that have occurred since a given sequence number
|
||||
/// (see `DB::latest_sequence_number()`). This iterator cannot be constructed by
|
||||
/// the application.
|
||||
///
|
||||
/// The iterator item type is a tuple of (`u64`, `WriteBatch`) where the first
|
||||
/// value is the sequence number of the associated write batch.
|
||||
///
|
||||
pub struct DBWALIterator { |
||||
pub(crate) inner: *mut ffi::rocksdb_wal_iterator_t, |
||||
} |
||||
|
||||
impl DBWALIterator { |
||||
/// Returns `true` if the iterator is valid. An iterator is invalidated when
|
||||
/// it reaches the end of its defined range, or when it encounters an error.
|
||||
///
|
||||
/// To check whether the iterator encountered an error after `valid` has
|
||||
/// returned `false`, use the [`status`](DBWALIterator::status) method.
|
||||
/// `status` will never return an error when `valid` is `true`.
|
||||
pub fn valid(&self) -> bool { |
||||
unsafe { ffi::rocksdb_wal_iter_valid(self.inner) != 0 } |
||||
} |
||||
|
||||
/// Returns an error `Result` if the iterator has encountered an error
|
||||
/// during operation. When an error is encountered, the iterator is
|
||||
/// invalidated and [`valid`](DBWALIterator::valid) will return `false` when
|
||||
/// called.
|
||||
pub fn status(&self) -> Result<(), Error> { |
||||
unsafe { |
||||
ffi_try!(ffi::rocksdb_wal_iter_status(self.inner)); |
||||
} |
||||
Ok(()) |
||||
} |
||||
} |
||||
|
||||
impl Iterator for DBWALIterator { |
||||
type Item = (u64, WriteBatch); |
||||
|
||||
fn next(&mut self) -> Option<(u64, WriteBatch)> { |
||||
// Seek to the next write batch.
|
||||
unsafe { |
||||
ffi::rocksdb_wal_iter_next(self.inner); |
||||
} |
||||
if self.valid() { |
||||
let mut seq: u64 = 0; |
||||
let inner = unsafe { ffi::rocksdb_wal_iter_get_batch(self.inner, &mut seq) }; |
||||
Some((seq, WriteBatch { inner })) |
||||
} else { |
||||
None |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Drop for DBWALIterator { |
||||
fn drop(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_wal_iter_destroy(self.inner); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,72 @@ |
||||
// Copyright 2020 Tyler Neely
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{ffi, DB}; |
||||
use core::ops::Deref; |
||||
use libc::size_t; |
||||
use std::marker::PhantomData; |
||||
use std::slice; |
||||
|
||||
/// Wrapper around RocksDB PinnableSlice struct.
|
||||
///
|
||||
/// With a pinnable slice, we can directly leverage in-memory data within
|
||||
/// RocksDB to avoid unnecessary memory copies. The struct here wraps the
|
||||
/// returned raw pointer and ensures proper finalization work.
|
||||
pub struct DBPinnableSlice<'a> { |
||||
ptr: *mut ffi::rocksdb_pinnableslice_t, |
||||
db: PhantomData<&'a DB>, |
||||
} |
||||
|
||||
unsafe impl<'a> Send for DBPinnableSlice<'a> {} |
||||
unsafe impl<'a> Sync for DBPinnableSlice<'a> {} |
||||
|
||||
impl<'a> AsRef<[u8]> for DBPinnableSlice<'a> { |
||||
fn as_ref(&self) -> &[u8] { |
||||
// Implement this via Deref so as not to repeat ourselves
|
||||
&*self |
||||
} |
||||
} |
||||
|
||||
impl<'a> Deref for DBPinnableSlice<'a> { |
||||
type Target = [u8]; |
||||
|
||||
fn deref(&self) -> &[u8] { |
||||
unsafe { |
||||
let mut val_len: size_t = 0; |
||||
let val = ffi::rocksdb_pinnableslice_value(self.ptr, &mut val_len) as *mut u8; |
||||
slice::from_raw_parts(val, val_len) |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<'a> Drop for DBPinnableSlice<'a> { |
||||
fn drop(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_pinnableslice_destroy(self.ptr); |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<'a> DBPinnableSlice<'a> { |
||||
/// Used to wrap a PinnableSlice from rocksdb to avoid unnecessary memcpy
|
||||
///
|
||||
/// # Unsafe
|
||||
/// Requires that the pointer must be generated by rocksdb_get_pinned
|
||||
pub(crate) unsafe fn from_c(ptr: *mut ffi::rocksdb_pinnableslice_t) -> DBPinnableSlice<'a> { |
||||
DBPinnableSlice { |
||||
ptr, |
||||
db: PhantomData, |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,143 @@ |
||||
// Copyright 2020 Tyler Neely
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{ffi, ColumnFamily, DBIterator, DBRawIterator, Error, IteratorMode, ReadOptions, DB}; |
||||
|
||||
/// A consistent view of the database at the point of creation.
|
||||
///
|
||||
/// ```
|
||||
/// use rocksdb::{DB, IteratorMode, Options};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage3";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let snapshot = db.snapshot(); // Creates a longer-term snapshot of the DB, but closed when goes out of scope
|
||||
/// let mut iter = snapshot.iterator(IteratorMode::Start); // Make as many iterators as you'd like from one snapshot
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
///
|
||||
pub struct Snapshot<'a> { |
||||
db: &'a DB, |
||||
pub(crate) inner: *const ffi::rocksdb_snapshot_t, |
||||
} |
||||
|
||||
impl<'a> Snapshot<'a> { |
||||
pub fn new(db: &DB) -> Snapshot { |
||||
let snapshot = unsafe { ffi::rocksdb_create_snapshot(db.inner) }; |
||||
Snapshot { |
||||
db, |
||||
inner: snapshot, |
||||
} |
||||
} |
||||
|
||||
pub fn iterator(&self, mode: IteratorMode) -> DBIterator<'a> { |
||||
let readopts = ReadOptions::default(); |
||||
self.iterator_opt(mode, readopts) |
||||
} |
||||
|
||||
pub fn iterator_cf(&self, cf_handle: &ColumnFamily, mode: IteratorMode) -> DBIterator { |
||||
let readopts = ReadOptions::default(); |
||||
self.iterator_cf_opt(cf_handle, readopts, mode) |
||||
} |
||||
|
||||
pub fn iterator_opt(&self, mode: IteratorMode, mut readopts: ReadOptions) -> DBIterator<'a> { |
||||
readopts.set_snapshot(self); |
||||
DBIterator::new(self.db, &readopts, mode) |
||||
} |
||||
|
||||
pub fn iterator_cf_opt( |
||||
&self, |
||||
cf_handle: &ColumnFamily, |
||||
mut readopts: ReadOptions, |
||||
mode: IteratorMode, |
||||
) -> DBIterator { |
||||
readopts.set_snapshot(self); |
||||
DBIterator::new_cf(self.db, cf_handle, &readopts, mode) |
||||
} |
||||
|
||||
/// Opens a raw iterator over the data in this snapshot, using the default read options.
|
||||
pub fn raw_iterator(&self) -> DBRawIterator { |
||||
let readopts = ReadOptions::default(); |
||||
self.raw_iterator_opt(readopts) |
||||
} |
||||
|
||||
/// Opens a raw iterator over the data in this snapshot under the given column family, using the default read options.
|
||||
pub fn raw_iterator_cf(&self, cf_handle: &ColumnFamily) -> DBRawIterator { |
||||
let readopts = ReadOptions::default(); |
||||
self.raw_iterator_cf_opt(cf_handle, readopts) |
||||
} |
||||
|
||||
/// Opens a raw iterator over the data in this snapshot, using the given read options.
|
||||
pub fn raw_iterator_opt(&self, mut readopts: ReadOptions) -> DBRawIterator { |
||||
readopts.set_snapshot(self); |
||||
DBRawIterator::new(self.db, &readopts) |
||||
} |
||||
|
||||
/// Opens a raw iterator over the data in this snapshot under the given column family, using the given read options.
|
||||
pub fn raw_iterator_cf_opt( |
||||
&self, |
||||
cf_handle: &ColumnFamily, |
||||
mut readopts: ReadOptions, |
||||
) -> DBRawIterator { |
||||
readopts.set_snapshot(self); |
||||
DBRawIterator::new_cf(self.db, cf_handle, &readopts) |
||||
} |
||||
|
||||
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Vec<u8>>, Error> { |
||||
let readopts = ReadOptions::default(); |
||||
self.get_opt(key, readopts) |
||||
} |
||||
|
||||
pub fn get_cf<K: AsRef<[u8]>>( |
||||
&self, |
||||
cf: &ColumnFamily, |
||||
key: K, |
||||
) -> Result<Option<Vec<u8>>, Error> { |
||||
let readopts = ReadOptions::default(); |
||||
self.get_cf_opt(cf, key.as_ref(), readopts) |
||||
} |
||||
|
||||
pub fn get_opt<K: AsRef<[u8]>>( |
||||
&self, |
||||
key: K, |
||||
mut readopts: ReadOptions, |
||||
) -> Result<Option<Vec<u8>>, Error> { |
||||
readopts.set_snapshot(self); |
||||
self.db.get_opt(key.as_ref(), &readopts) |
||||
} |
||||
|
||||
pub fn get_cf_opt<K: AsRef<[u8]>>( |
||||
&self, |
||||
cf: &ColumnFamily, |
||||
key: K, |
||||
mut readopts: ReadOptions, |
||||
) -> Result<Option<Vec<u8>>, Error> { |
||||
readopts.set_snapshot(self); |
||||
self.db.get_cf_opt(cf, key.as_ref(), &readopts) |
||||
} |
||||
} |
||||
|
||||
impl<'a> Drop for Snapshot<'a> { |
||||
fn drop(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_release_snapshot(self.db.inner, self.inner); |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// `Send` and `Sync` implementations for `Snapshot` are safe, because `Snapshot` is
|
||||
/// immutable and can be safely shared between threads.
|
||||
unsafe impl<'a> Send for Snapshot<'a> {} |
||||
unsafe impl<'a> Sync for Snapshot<'a> {} |
@ -0,0 +1,281 @@ |
||||
// Copyright 2020 Tyler Neely
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{ffi, ColumnFamily}; |
||||
use libc::{c_char, c_void, size_t}; |
||||
use std::slice; |
||||
|
||||
/// An atomic batch of write operations.
|
||||
///
|
||||
/// Making an atomic commit of several writes:
|
||||
///
|
||||
/// ```
|
||||
/// use rocksdb::{DB, Options, WriteBatch};
|
||||
///
|
||||
/// let path = "_path_for_rocksdb_storage1";
|
||||
/// {
|
||||
/// let db = DB::open_default(path).unwrap();
|
||||
/// let mut batch = WriteBatch::default();
|
||||
/// batch.put(b"my key", b"my value");
|
||||
/// batch.put(b"key2", b"value2");
|
||||
/// batch.put(b"key3", b"value3");
|
||||
/// db.write(batch); // Atomically commits the batch
|
||||
/// }
|
||||
/// let _ = DB::destroy(&Options::default(), path);
|
||||
/// ```
|
||||
pub struct WriteBatch { |
||||
pub(crate) inner: *mut ffi::rocksdb_writebatch_t, |
||||
} |
||||
|
||||
/// Receives the puts and deletes of a write batch.
|
||||
///
|
||||
/// The application must provide an implementation of this trait when
|
||||
/// iterating the operations within a `WriteBatch`
|
||||
pub trait WriteBatchIterator { |
||||
/// Called with a key and value that were `put` into the batch.
|
||||
fn put(&mut self, key: Box<[u8]>, value: Box<[u8]>); |
||||
/// Called with a key that was `delete`d from the batch.
|
||||
fn delete(&mut self, key: Box<[u8]>); |
||||
} |
||||
|
||||
unsafe extern "C" fn writebatch_put_callback( |
||||
state: *mut c_void, |
||||
k: *const c_char, |
||||
klen: usize, |
||||
v: *const c_char, |
||||
vlen: usize, |
||||
) { |
||||
// coerce the raw pointer back into a box, but "leak" it so we prevent
|
||||
// freeing the resource before we are done with it
|
||||
let boxed_cb = Box::from_raw(state as *mut &mut dyn WriteBatchIterator); |
||||
let leaked_cb = Box::leak(boxed_cb); |
||||
let key = slice::from_raw_parts(k as *const u8, klen as usize); |
||||
let value = slice::from_raw_parts(v as *const u8, vlen as usize); |
||||
leaked_cb.put( |
||||
key.to_vec().into_boxed_slice(), |
||||
value.to_vec().into_boxed_slice(), |
||||
); |
||||
} |
||||
|
||||
unsafe extern "C" fn writebatch_delete_callback(state: *mut c_void, k: *const c_char, klen: usize) { |
||||
// coerce the raw pointer back into a box, but "leak" it so we prevent
|
||||
// freeing the resource before we are done with it
|
||||
let boxed_cb = Box::from_raw(state as *mut &mut dyn WriteBatchIterator); |
||||
let leaked_cb = Box::leak(boxed_cb); |
||||
let key = slice::from_raw_parts(k as *const u8, klen as usize); |
||||
leaked_cb.delete(key.to_vec().into_boxed_slice()); |
||||
} |
||||
|
||||
impl WriteBatch { |
||||
pub fn len(&self) -> usize { |
||||
unsafe { ffi::rocksdb_writebatch_count(self.inner) as usize } |
||||
} |
||||
|
||||
/// Return WriteBatch serialized size (in bytes).
|
||||
pub fn size_in_bytes(&self) -> usize { |
||||
unsafe { |
||||
let mut batch_size: size_t = 0; |
||||
ffi::rocksdb_writebatch_data(self.inner, &mut batch_size); |
||||
batch_size as usize |
||||
} |
||||
} |
||||
|
||||
pub fn is_empty(&self) -> bool { |
||||
self.len() == 0 |
||||
} |
||||
|
||||
/// Iterate the put and delete operations within this write batch. Note that
|
||||
/// this does _not_ return an `Iterator` but instead will invoke the `put()`
|
||||
/// and `delete()` member functions of the provided `WriteBatchIterator`
|
||||
/// trait implementation.
|
||||
pub fn iterate(&self, callbacks: &mut dyn WriteBatchIterator) { |
||||
let state = Box::into_raw(Box::new(callbacks)); |
||||
unsafe { |
||||
ffi::rocksdb_writebatch_iterate( |
||||
self.inner, |
||||
state as *mut c_void, |
||||
Some(writebatch_put_callback), |
||||
Some(writebatch_delete_callback), |
||||
); |
||||
// we must manually set the raw box free since there is no
|
||||
// associated "destroy" callback for this object
|
||||
Box::from_raw(state); |
||||
} |
||||
} |
||||
|
||||
/// Insert a value into the database under the given key.
|
||||
pub fn put<K, V>(&mut self, key: K, value: V) |
||||
where |
||||
K: AsRef<[u8]>, |
||||
V: AsRef<[u8]>, |
||||
{ |
||||
let key = key.as_ref(); |
||||
let value = value.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_put( |
||||
self.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
value.as_ptr() as *const c_char, |
||||
value.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
pub fn put_cf<K, V>(&mut self, cf: &ColumnFamily, key: K, value: V) |
||||
where |
||||
K: AsRef<[u8]>, |
||||
V: AsRef<[u8]>, |
||||
{ |
||||
let key = key.as_ref(); |
||||
let value = value.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_put_cf( |
||||
self.inner, |
||||
cf.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
value.as_ptr() as *const c_char, |
||||
value.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
pub fn merge<K, V>(&mut self, key: K, value: V) |
||||
where |
||||
K: AsRef<[u8]>, |
||||
V: AsRef<[u8]>, |
||||
{ |
||||
let key = key.as_ref(); |
||||
let value = value.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_merge( |
||||
self.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
value.as_ptr() as *const c_char, |
||||
value.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
pub fn merge_cf<K, V>(&mut self, cf: &ColumnFamily, key: K, value: V) |
||||
where |
||||
K: AsRef<[u8]>, |
||||
V: AsRef<[u8]>, |
||||
{ |
||||
let key = key.as_ref(); |
||||
let value = value.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_merge_cf( |
||||
self.inner, |
||||
cf.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
value.as_ptr() as *const c_char, |
||||
value.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Removes the database entry for key. Does nothing if the key was not found.
|
||||
pub fn delete<K: AsRef<[u8]>>(&mut self, key: K) { |
||||
let key = key.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_delete( |
||||
self.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
pub fn delete_cf<K: AsRef<[u8]>>(&mut self, cf: &ColumnFamily, key: K) { |
||||
let key = key.as_ref(); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_delete_cf( |
||||
self.inner, |
||||
cf.inner, |
||||
key.as_ptr() as *const c_char, |
||||
key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Remove database entries from start key to end key.
|
||||
///
|
||||
/// Removes the database entries in the range ["begin_key", "end_key"), i.e.,
|
||||
/// including "begin_key" and excluding "end_key". It is not an error if no
|
||||
/// keys exist in the range ["begin_key", "end_key").
|
||||
pub fn delete_range<K: AsRef<[u8]>>(&mut self, from: K, to: K) { |
||||
let (start_key, end_key) = (from.as_ref(), to.as_ref()); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_delete_range( |
||||
self.inner, |
||||
start_key.as_ptr() as *const c_char, |
||||
start_key.len() as size_t, |
||||
end_key.as_ptr() as *const c_char, |
||||
end_key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Remove database entries in column family from start key to end key.
|
||||
///
|
||||
/// Removes the database entries in the range ["begin_key", "end_key"), i.e.,
|
||||
/// including "begin_key" and excluding "end_key". It is not an error if no
|
||||
/// keys exist in the range ["begin_key", "end_key").
|
||||
pub fn delete_range_cf<K: AsRef<[u8]>>(&mut self, cf: &ColumnFamily, from: K, to: K) { |
||||
let (start_key, end_key) = (from.as_ref(), to.as_ref()); |
||||
|
||||
unsafe { |
||||
ffi::rocksdb_writebatch_delete_range_cf( |
||||
self.inner, |
||||
cf.inner, |
||||
start_key.as_ptr() as *const c_char, |
||||
start_key.len() as size_t, |
||||
end_key.as_ptr() as *const c_char, |
||||
end_key.len() as size_t, |
||||
); |
||||
} |
||||
} |
||||
|
||||
/// Clear all updates buffered in this batch.
|
||||
pub fn clear(&mut self) { |
||||
unsafe { |
||||
ffi::rocksdb_writebatch_clear(self.inner); |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Default for WriteBatch { |
||||
fn default() -> WriteBatch { |
||||
WriteBatch { |
||||
inner: unsafe { ffi::rocksdb_writebatch_create() }, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Drop for WriteBatch { |
||||
fn drop(&mut self) { |
||||
unsafe { ffi::rocksdb_writebatch_destroy(self.inner) } |
||||
} |
||||
} |
Loading…
Reference in new issue