Merge pull request #123 from derekdreery/update_rocksdb

Make rocksdb work with gcc 7 (v5.4.6)
master
Tyler Neely 8 years ago committed by GitHub
commit dc445fd2f4
  1. 2
      librocksdb-sys/rocksdb
  2. 2
      librocksdb-sys/rocksdb_lib_sources.txt
  3. 10
      librocksdb-sys/src/lib.rs
  4. 1
      librocksdb-sys/tests/ffi.rs
  5. 12
      src/db.rs
  6. 78
      src/db_options.rs
  7. 30
      tests/test_raw_iterator.rs

@ -1 +1 @@
Subproject commit f201a44b4102308b840b15d9b89122af787476f1 Subproject commit 2e98ac018f8e1d886c75ecc2268e8a52f685bf39

File diff suppressed because one or more lines are too long

@ -294,6 +294,8 @@ extern "C" {
pub fn rocksdb_iter_seek(iterator: *mut rocksdb_iterator_t, k: *const c_char, klen: size_t); pub fn rocksdb_iter_seek(iterator: *mut rocksdb_iterator_t, k: *const c_char, klen: size_t);
pub fn rocksdb_iter_seek_for_prev(iterator: *mut rocksdb_iterator_t, k: *const c_char, klen: size_t);
pub fn rocksdb_iter_next(iterator: *mut rocksdb_iterator_t); pub fn rocksdb_iter_next(iterator: *mut rocksdb_iterator_t);
pub fn rocksdb_iter_prev(iterator: *mut rocksdb_iterator_t); pub fn rocksdb_iter_prev(iterator: *mut rocksdb_iterator_t);
@ -633,7 +635,10 @@ extern "C" {
pub fn rocksdb_options_set_purge_redundant_kvs_while_flush(opt: *mut rocksdb_options_t, pub fn rocksdb_options_set_purge_redundant_kvs_while_flush(opt: *mut rocksdb_options_t,
v: c_uchar); v: c_uchar);
pub fn rocksdb_options_set_allow_os_buffer(opt: *mut rocksdb_options_t, v: c_uchar); pub fn rocksdb_options_set_use_direct_reads(opt: *mut rocksdb_options_t, v: c_uchar);
pub fn rocksdb_options_set_use_direct_io_for_flush_and_compaction(opt: *mut rocksdb_options_t,
v: c_uchar);
pub fn rocksdb_options_set_allow_mmap_reads(opt: *mut rocksdb_options_t, v: c_uchar); pub fn rocksdb_options_set_allow_mmap_reads(opt: *mut rocksdb_options_t, v: c_uchar);
@ -655,6 +660,9 @@ extern "C" {
pub fn rocksdb_options_set_bytes_per_sync(opt: *mut rocksdb_options_t, v: uint64_t); pub fn rocksdb_options_set_bytes_per_sync(opt: *mut rocksdb_options_t, v: uint64_t);
pub fn rocksdb_options_set_allow_concurrent_memtable_write(opt: *mut rocksdb_options_t,
v: c_uchar);
pub fn rocksdb_options_set_verify_checksums_in_compaction(opt: *mut rocksdb_options_t, pub fn rocksdb_options_set_verify_checksums_in_compaction(opt: *mut rocksdb_options_t,
v: c_uchar); v: c_uchar);

@ -1030,6 +1030,7 @@ fn ffi() {
rocksdb_slicetransform_create_fixed_prefix(3)); rocksdb_slicetransform_create_fixed_prefix(3));
rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4); rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4);
rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16); rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16);
rocksdb_options_set_allow_concurrent_memtable_write(options, 0);
db = rocksdb_open(options, dbname, &mut err); db = rocksdb_open(options, dbname, &mut err);
CheckNoError!(err); CheckNoError!(err);

@ -135,6 +135,14 @@ pub struct Snapshot<'a> {
/// iter.next(); /// iter.next();
/// } /// }
/// ///
/// // Reverse iteration from key
/// // Note, use seek_for_prev when reversing because if this key doesn't exist,
/// // this will make the iterator start from the previous key rather than the next.
/// iter.seek_for_prev(b"my key");
/// while iter.valid() {
/// println!("Saw {:?} {:?}", iter.key(), iter.value());
/// iter.prev();
/// }
/// ``` /// ```
pub struct DBRawIterator { pub struct DBRawIterator {
inner: *mut ffi::rocksdb_iterator_t, inner: *mut ffi::rocksdb_iterator_t,
@ -307,9 +315,6 @@ impl DBRawIterator {
unsafe { ffi::rocksdb_iter_seek(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); } unsafe { ffi::rocksdb_iter_seek(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); }
} }
/*
SeekForPrev was added in RocksDB 4.13 but not implemented in the C API until RocksDB 5.0
/// Seeks to the specified key, or the first key that lexicographically precedes it. /// Seeks to the specified key, or the first key that lexicographically precedes it.
/// ///
/// Like ``.seek()`` this method will attempt to seek to the specified key. /// Like ``.seek()`` this method will attempt to seek to the specified key.
@ -336,7 +341,6 @@ impl DBRawIterator {
pub fn seek_for_prev(&mut self, key: &[u8]) { pub fn seek_for_prev(&mut self, key: &[u8]) {
unsafe { ffi::rocksdb_iter_seek_for_prev(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); } unsafe { ffi::rocksdb_iter_seek_for_prev(self.inner, key.as_ptr() as *const c_char, key.len() as size_t); }
} }
*/
/// Seeks to the next key. /// Seeks to the next key.
/// ///

@ -347,10 +347,81 @@ impl Options {
} }
} }
/// If true, allow multi-writers to update mem tables in parallel.
/// Only some memtable_factory-s support concurrent writes; currently it
/// is implemented only for SkipListFactory. Concurrent memtable writes
/// are not compatible with inplace_update_support or filter_deletes.
/// It is strongly recommended to set enable_write_thread_adaptive_yield
/// if you are going to use this feature.
///
/// Default: true
///
/// # Example
///
/// ```
/// use rocksdb::Options;
///
/// let mut opts = Options::default();
/// opts.set_allow_concurrent_memtable_write(false);
/// ```
pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
unsafe { ffi::rocksdb_options_set_allow_concurrent_memtable_write(self.inner,
allow as c_uchar) }
}
pub fn set_disable_data_sync(&mut self, disable: bool) { pub fn set_disable_data_sync(&mut self, disable: bool) {
unsafe { ffi::rocksdb_options_set_disable_data_sync(self.inner, disable as c_int) } unsafe { ffi::rocksdb_options_set_disable_data_sync(self.inner, disable as c_int) }
} }
/// Enable direct I/O mode for reading
/// they may or may not improve performance depending on the use case
///
/// Files will be opened in "direct I/O" mode
/// which means that data read from the disk will not be cached or
/// buffered. The hardware buffer of the devices may however still
/// be used. Memory mapped files are not impacted by these parameters.
///
/// Default: false
///
/// # Example
///
/// ```
/// use rocksdb::Options;
///
/// let mut opts = Options::default();
/// opts.set_use_direct_reads(true);
/// ```
pub fn set_use_direct_reads(&mut self, enabled: bool) {
unsafe {
ffi::rocksdb_options_set_use_direct_reads(self.inner, enabled as c_uchar);
}
}
/// Enable direct I/O mode for flush and compaction
///
/// Files will be opened in "direct I/O" mode
/// which means that data written to the disk will not be cached or
/// buffered. The hardware buffer of the devices may however still
/// be used. Memory mapped files are not impacted by these parameters.
/// they may or may not improve performance depending on the use case
///
/// Default: false
///
/// # Example
///
/// ```
/// use rocksdb::Options;
///
/// let mut opts = Options::default();
/// opts.set_use_direct_io_for_flush_and_compaction(true);
/// ```
pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
unsafe {
ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(self.inner,
enabled as c_uchar);
}
}
/// Hints to the OS that it should not buffer disk I/O. Enabling this /// Hints to the OS that it should not buffer disk I/O. Enabling this
/// parameter may improve performance but increases pressure on the /// parameter may improve performance but increases pressure on the
/// system cache. /// system cache.
@ -373,15 +444,16 @@ impl Options {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// #[allow(deprecated)]
/// use rocksdb::Options; /// use rocksdb::Options;
/// ///
/// let mut opts = Options::default(); /// let mut opts = Options::default();
/// opts.set_allow_os_buffer(false); /// opts.set_allow_os_buffer(false);
/// ``` /// ```
#[deprecated(since="0.7.0", note="replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods")]
pub fn set_allow_os_buffer(&mut self, is_allow: bool) { pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
unsafe { self.set_use_direct_reads(!is_allow);
ffi::rocksdb_options_set_allow_os_buffer(self.inner, is_allow as c_uchar); self.set_use_direct_io_for_flush_and_compaction(!is_allow);
}
} }
/// Sets the number of shards used for table cache. /// Sets the number of shards used for table cache.

@ -98,7 +98,6 @@ pub fn test_seek() {
let db = setup_test_db("seek"); let db = setup_test_db("seek");
db.put(b"k1", b"v1").unwrap(); db.put(b"k1", b"v1").unwrap();
db.put(b"k2", b"v2").unwrap(); db.put(b"k2", b"v2").unwrap();
db.put(b"k3", b"v3").unwrap();
db.put(b"k4", b"v4").unwrap(); db.put(b"k4", b"v4").unwrap();
let mut iter = db.raw_iterator(); let mut iter = db.raw_iterator();
@ -107,6 +106,13 @@ pub fn test_seek() {
assert_eq!(iter.valid(), true); assert_eq!(iter.valid(), true);
assert_eq!(iter.key(), Some(b"k2".to_vec())); assert_eq!(iter.key(), Some(b"k2".to_vec()));
assert_eq!(iter.value(), Some(b"v2".to_vec())); assert_eq!(iter.value(), Some(b"v2".to_vec()));
// Check it gets the next key when the key doesn't exist
iter.seek(b"k3");
assert_eq!(iter.valid(), true);
assert_eq!(iter.key(), Some(b"k4".to_vec()));
assert_eq!(iter.value(), Some(b"v4".to_vec()));
} }
@ -124,3 +130,25 @@ pub fn test_seek_to_nonexistant() {
assert_eq!(iter.key(), Some(b"k3".to_vec())); assert_eq!(iter.key(), Some(b"k3".to_vec()));
assert_eq!(iter.value(), Some(b"v3".to_vec())); assert_eq!(iter.value(), Some(b"v3".to_vec()));
} }
#[test]
pub fn test_seek_for_prev() {
let db = setup_test_db("seek_for_prev");
db.put(b"k1", b"v1").unwrap();
db.put(b"k2", b"v2").unwrap();
db.put(b"k4", b"v4").unwrap();
let mut iter = db.raw_iterator();
iter.seek(b"k2");
assert_eq!(iter.valid(), true);
assert_eq!(iter.key(), Some(b"k2".to_vec()));
assert_eq!(iter.value(), Some(b"v2".to_vec()));
// Check it gets the previous key when the key doesn't exist
iter.seek_for_prev(b"k3");
assert_eq!(iter.valid(), true);
assert_eq!(iter.key(), Some(b"k2".to_vec()));
assert_eq!(iter.value(), Some(b"v2".to_vec()));
}

Loading…
Cancel
Save