Add clippy linter in CI (#417)

master
Oleksandr Anyshchenko 5 years ago committed by GitHub
parent 8f7124b488
commit d4023f2683
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 12
      .travis.yml
  2. 8
      .travis/lints.sh
  3. 4
      .travis/tests.sh
  4. 6
      LICENSE
  5. 7
      librocksdb-sys/build.rs
  6. 3
      librocksdb-sys/src/lib.rs
  7. 2
      librocksdb-sys/src/test.rs
  8. 45
      src/backup.rs
  9. 13
      src/checkpoint.rs
  10. 13
      src/compaction_filter.rs
  11. 2
      src/comparator.rs
  12. 71
      src/db.rs
  13. 6
      src/db_iterator.rs
  14. 2
      src/db_options.rs
  15. 24
      src/lib.rs
  16. 60
      src/merge_operator.rs
  17. 2
      src/slice_transform.rs
  18. 4
      tests/test_backup.rs
  19. 4
      tests/test_checkpoint.rs
  20. 4
      tests/test_column_family.rs
  21. 6
      tests/test_compationfilter.rs
  22. 14
      tests/test_db.rs
  23. 17
      tests/test_iterator.rs
  24. 15
      tests/test_multithreaded.rs
  25. 16
      tests/test_pinnable_slice.rs
  26. 4
      tests/test_property.rs
  27. 4
      tests/test_raw_iterator.rs
  28. 4
      tests/test_rocksdb_options.rs
  29. 18
      tests/test_slice_transform.rs
  30. 6
      tests/test_write_batch.rs
  31. 1
      tests/util/mod.rs

@ -4,20 +4,16 @@ dist: bionic
os:
- linux
- osx
- windows
rust:
- stable
jobs:
allow_failures:
- os: windows
install:
- rustup component add rustfmt
- rustfmt -V
- rustup component add clippy
- cargo clippy --version
script:
- cargo fmt --all -- --check
- cargo test --manifest-path=librocksdb-sys/Cargo.toml
- cargo test -- --skip test_iterator_outlive_db
- .travis/lints.sh
- .travis/tests.sh

@ -0,0 +1,8 @@
#!/bin/bash
# Run cargo fmt and cargo clippy only on OSX host
if [[ ${TRAVIS_OS_NAME} == "osx" ]]; then
cargo fmt --all -- --check
cargo clippy --all --tests -- -D warnings
fi

@ -0,0 +1,4 @@
#!/bin/bash
cargo test --manifest-path=librocksdb-sys/Cargo.toml
cargo test -- --skip test_iterator_outlive_db

@ -187,11 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Tyler Neely
Copyright 2015 Tyler Neely
Copyright 2016 Tyler Neely
Copyright 2017 Tyler Neely
Copyright 2018 Tyler Neely
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

@ -183,12 +183,12 @@ fn build_rocksdb() {
fn build_snappy() {
let target = env::var("TARGET").unwrap();
let endianness = env::var("CARGO_CFG_TARGET_ENDIAN").unwrap();
let mut config = cc::Build::new();
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
config.extra_warnings(false);
if target.contains("msvc") {
config.flag("-EHsc");
@ -250,6 +250,7 @@ fn build_zstd() {
}
compiler.opt_level(3);
compiler.extra_warnings(false);
compiler.define("ZSTD_LIB_DEPRECATED", Some("0"));
compiler.compile("libzstd.a");
@ -269,6 +270,7 @@ fn build_zlib() {
compiler.flag_if_supported("-Wno-implicit-function-declaration");
compiler.opt_level(3);
compiler.extra_warnings(false);
compiler.compile("libz.a");
}
@ -290,6 +292,7 @@ fn build_bzip2() {
compiler.extra_warnings(false);
compiler.opt_level(3);
compiler.extra_warnings(false);
compiler.compile("libbz2.a");
}

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely, Alex Regueiro
// Copyright 2020 Tyler Neely, Alex Regueiro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::all)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

@ -38,15 +38,14 @@ impl BackupEngine {
path: P,
) -> Result<BackupEngine, Error> {
let path = path.as_ref();
let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new(
"Failed to convert path to CString \
let cpath = if let Ok(e) = CString::new(path.to_string_lossy().as_bytes()) {
e
} else {
return Err(Error::new(
"Failed to convert path to CString \
when opening backup engine"
.to_owned(),
));
}
.to_owned(),
));
};
let be: *mut ffi::rocksdb_backup_engine_t;
@ -107,27 +106,25 @@ impl BackupEngine {
opts: &RestoreOptions,
) -> Result<(), Error> {
let db_dir = db_dir.as_ref();
let c_db_dir = match CString::new(db_dir.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new(
"Failed to convert db_dir to CString \
let c_db_dir = if let Ok(c) = CString::new(db_dir.to_string_lossy().as_bytes()) {
c
} else {
return Err(Error::new(
"Failed to convert db_dir to CString \
when restoring from latest backup"
.to_owned(),
));
}
.to_owned(),
));
};
let wal_dir = wal_dir.as_ref();
let c_wal_dir = match CString::new(wal_dir.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new(
"Failed to convert wal_dir to CString \
let c_wal_dir = if let Ok(c) = CString::new(wal_dir.to_string_lossy().as_bytes()) {
c
} else {
return Err(Error::new(
"Failed to convert wal_dir to CString \
when restoring from latest backup"
.to_owned(),
));
}
.to_owned(),
));
};
unsafe {

@ -50,13 +50,12 @@ impl Checkpoint {
/// Creates new physical DB checkpoint in directory specified by `path`.
pub fn create_checkpoint<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> {
let path = path.as_ref();
let cpath = match CString::new(path.to_string_lossy().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new(
"Failed to convert path to CString when creating DB checkpoint".to_owned(),
));
}
let cpath = if let Ok(c) = CString::new(path.to_string_lossy().as_bytes()) {
c
} else {
return Err(Error::new(
"Failed to convert path to CString when creating DB checkpoint".to_owned(),
));
};
unsafe {

@ -1,4 +1,4 @@
// Copyright 2016 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -41,12 +41,7 @@ pub enum Decision {
///
/// [set_compaction_filter]: ../struct.Options.html#method.set_compaction_filter
pub trait CompactionFilterFn: FnMut(u32, &[u8], &[u8]) -> Decision {}
impl<F> CompactionFilterFn for F
where
F: FnMut(u32, &[u8], &[u8]) -> Decision,
F: Send + 'static,
{
}
impl<F> CompactionFilterFn for F where F: FnMut(u32, &[u8], &[u8]) -> Decision + Send + 'static {}
pub struct CompactionFilterCallback<F>
where
@ -85,7 +80,7 @@ pub unsafe extern "C" fn filter_callback<F>(
where
F: CompactionFilterFn,
{
use self::Decision::*;
use self::Decision::{Change, Keep, Remove};
let cb = &mut *(raw_cb as *mut CompactionFilterCallback<F>);
let key = slice::from_raw_parts(raw_key as *const u8, key_length as usize);
@ -106,7 +101,7 @@ where
#[cfg(test)]
#[allow(unused_variables)]
fn test_filter(level: u32, key: &[u8], value: &[u8]) -> Decision {
use self::Decision::*;
use self::Decision::{Change, Keep, Remove};
match key.first() {
Some(&b'_') => Remove,
Some(&b'%') => Change(b"secret"),

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -101,7 +101,7 @@ impl DB {
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
DB::open_cf_descriptors_internal(opts, path, cfs, AccessType::ReadWrite)
DB::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
/// Opens a database for read only with the given database options and column family names.
@ -124,7 +124,7 @@ impl DB {
opts,
path,
cfs,
AccessType::ReadOnly {
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
@ -150,7 +150,7 @@ impl DB {
opts,
primary_path,
cfs,
AccessType::Secondary {
&AccessType::Secondary {
secondary_path: secondary_path.as_ref(),
},
)
@ -162,7 +162,7 @@ impl DB {
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
DB::open_cf_descriptors_internal(opts, path, cfs, AccessType::ReadWrite)
DB::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
/// Internal implementation for opening RocksDB.
@ -170,7 +170,7 @@ impl DB {
opts: &Options,
path: P,
cfs: I,
access_type: AccessType,
access_type: &AccessType,
) -> Result<DB, Error>
where
P: AsRef<Path>,
@ -191,7 +191,7 @@ impl DB {
let mut cf_map = BTreeMap::new();
if cfs.is_empty() {
db = DB::open_raw(opts, cpath, access_type)?;
db = DB::open_raw(opts, &cpath, access_type)?;
} else {
let mut cfs_v = cfs;
// Always open the default column family.
@ -220,12 +220,12 @@ impl DB {
db = DB::open_cf_raw(
opts,
cpath,
&cpath,
&cfs_v,
&cfnames,
&cfopts,
&mut cfhandles,
access_type,
&access_type,
)?;
for handle in &cfhandles {
if handle.is_null() {
@ -253,11 +253,11 @@ impl DB {
fn open_raw(
opts: &Options,
cpath: CString,
access_type: AccessType,
cpath: &CString,
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match access_type {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only(
@ -282,15 +282,15 @@ impl DB {
fn open_cf_raw(
opts: &Options,
cpath: CString,
cpath: &CString,
cfs_v: &[ColumnFamilyDescriptor],
cfnames: &[*const c_char],
cfopts: &[*const ffi::rocksdb_options_t],
cfhandles: &mut Vec<*mut ffi::rocksdb_column_family_handle_t>,
access_type: AccessType,
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match access_type {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only_column_families(
@ -537,19 +537,18 @@ impl DB {
}
pub fn create_cf<N: AsRef<str>>(&mut self, name: N, opts: &Options) -> Result<(), Error> {
let cname = match CString::new(name.as_ref().as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(Error::new(
"Failed to convert path to CString when creating cf".to_owned(),
));
}
let cf_name = if let Ok(c) = CString::new(name.as_ref().as_bytes()) {
c
} else {
return Err(Error::new(
"Failed to convert path to CString when creating cf".to_owned(),
));
};
unsafe {
let inner = ffi_try!(ffi::rocksdb_create_column_family(
self.inner,
opts.inner,
cname.as_ptr(),
cf_name.as_ptr(),
));
self.cfs
@ -565,9 +564,7 @@ impl DB {
}
Ok(())
} else {
Err(Error::new(
format!("Invalid column family: {}", name).to_owned(),
))
Err(Error::new(format!("Invalid column family: {}", name)))
}
}
@ -896,8 +893,8 @@ impl DB {
pub fn compact_range<S: AsRef<[u8]>, E: AsRef<[u8]>>(&self, start: Option<S>, end: Option<E>) {
unsafe {
let start = start.as_ref().map(|s| s.as_ref());
let end = end.as_ref().map(|e| e.as_ref());
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range(
self.inner,
@ -916,8 +913,8 @@ impl DB {
end: Option<E>,
) {
unsafe {
let start = start.as_ref().map(|s| s.as_ref());
let end = end.as_ref().map(|e| e.as_ref());
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_cf(
self.inner,
@ -1220,9 +1217,9 @@ fn writebatch_works() {
assert!(db.get(b"k1").unwrap().is_none());
assert_eq!(batch.len(), 0);
assert!(batch.is_empty());
let _ = batch.put(b"k1", b"v1111");
let _ = batch.put(b"k2", b"v2222");
let _ = batch.put(b"k3", b"v3333");
batch.put(b"k1", b"v1111");
batch.put(b"k2", b"v2222");
batch.put(b"k3", b"v3333");
assert_eq!(batch.len(), 3);
assert!(!batch.is_empty());
assert!(db.get(b"k1").unwrap().is_none());
@ -1234,7 +1231,7 @@ fn writebatch_works() {
{
// test delete
let mut batch = WriteBatch::default();
let _ = batch.delete(b"k1");
batch.delete(b"k1");
assert_eq!(batch.len(), 1);
assert!(!batch.is_empty());
let p = db.write(batch);
@ -1244,7 +1241,7 @@ fn writebatch_works() {
{
// test delete_range
let mut batch = WriteBatch::default();
let _ = batch.delete_range(b"k2", b"k4");
batch.delete_range(b"k2", b"k4");
assert_eq!(batch.len(), 1);
assert!(!batch.is_empty());
let p = db.write(batch);
@ -1256,7 +1253,7 @@ fn writebatch_works() {
// test size_in_bytes
let mut batch = WriteBatch::default();
let before = batch.size_in_bytes();
let _ = batch.put(b"k1", b"v1234567890");
batch.put(b"k1", b"v1234567890");
let after = batch.size_in_bytes();
assert!(before + 10 <= after);
}
@ -1355,7 +1352,7 @@ fn iterator_test_tailing() {
(k.to_vec(), v.to_vec()),
(data[i].0.to_vec(), data[i].1.to_vec())
);
tot = tot + 1;
tot += 1;
}
assert_eq!(tot, data.len());
}

@ -454,13 +454,13 @@ impl<'a> Iterator for DBIterator<'a> {
// Initial call to next() after seeking should not move the iterator
// or the first item will not be returned
if !self.just_seeked {
if self.just_seeked {
self.just_seeked = false;
} else {
match self.direction {
Direction::Forward => self.raw.next(),
Direction::Reverse => self.raw.prev(),
}
} else {
self.just_seeked = false;
}
if self.raw.valid() {

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -54,6 +54,23 @@
//! ```
//!
#![warn(clippy::pedantic)]
#![allow(
// Next `cast_*` lints don't give alternatives.
clippy::cast_possible_wrap, clippy::cast_possible_truncation, clippy::cast_sign_loss,
// Next lints produce too much noise/false positives.
clippy::module_name_repetitions, clippy::similar_names, clippy::must_use_candidate,
clippy::pub_enum_variant_names,
// '... may panic' lints.
clippy::indexing_slicing,
// Too much work to fix.
clippy::missing_errors_doc,
// False positive: WebSocket
clippy::doc_markdown,
clippy::missing_safety_doc,
clippy::needless_pass_by_value
)]
#[macro_use]
mod ffi_util;
@ -136,7 +153,10 @@ impl fmt::Display for Error {
#[cfg(test)]
mod test {
use super::*;
use super::{
BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator,
Options, PlainTableFactoryOptions, ReadOptions, Snapshot, WriteOptions, DB,
};
#[test]
fn is_send() {

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -37,23 +37,22 @@
//! Some(result)
//! }
//!
//! fn main() {
//! let path = "_rust_path_to_rocksdb";
//! let mut opts = Options::default();
//! opts.create_if_missing(true);
//! opts.set_merge_operator("test operator", concat_merge, None);
//! {
//! let db = DB::open(&opts, path).unwrap();
//! let p = db.put(b"k1", b"a");
//! db.merge(b"k1", b"b");
//! db.merge(b"k1", b"c");
//! db.merge(b"k1", b"d");
//! db.merge(b"k1", b"efg");
//! let r = db.get(b"k1");
//! assert_eq!(r.unwrap().unwrap(), b"abcdefg");
//! }
//! let _ = DB::destroy(&opts, path);
//! }
//!let path = "_rust_path_to_rocksdb";
//!let mut opts = Options::default();
//!
//!opts.create_if_missing(true);
//!opts.set_merge_operator("test operator", concat_merge, None);
//!{
//! let db = DB::open(&opts, path).unwrap();
//! let p = db.put(b"k1", b"a");
//! db.merge(b"k1", b"b");
//! db.merge(b"k1", b"c");
//! db.merge(b"k1", b"d");
//! db.merge(b"k1", b"efg");
//! let r = db.get(b"k1");
//! assert_eq!(r.unwrap().unwrap(), b"abcdefg");
//!}
//!let _ = DB::destroy(&opts, path);
//! ```
use libc::{self, c_char, c_int, c_void, size_t};
@ -201,7 +200,7 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)]
mod test {
use super::*;
use super::MergeOperands;
fn test_provided_merge(
_new_key: &[u8],
@ -242,10 +241,13 @@ mod test {
let m = db.merge(b"k1", b"h");
assert!(m.is_ok());
match db.get(b"k1") {
Ok(Some(value)) => match std::str::from_utf8(&value) {
Ok(v) => println!("retrieved utf8 value: {}", v),
Err(_) => println!("did not read valid utf-8 out of the db"),
},
Ok(Some(value)) => {
if let Ok(v) = std::str::from_utf8(&value) {
println!("retrieved utf8 value: {}", v)
} else {
println!("did not read valid utf-8 out of the db")
}
}
Err(_) => println!("error reading value"),
_ => panic!("value not present"),
}
@ -264,16 +266,16 @@ mod test {
}
fn from_slice<T: Sized>(s: &[u8]) -> Option<&T> {
if ::std::mem::size_of::<T>() != s.len() {
if std::mem::size_of::<T>() == s.len() {
unsafe { Some(&*(s.as_ptr() as *const T)) }
} else {
println!(
"slice {:?} is len {}, but T is size {}",
s,
s.len(),
::std::mem::size_of::<T>()
std::mem::size_of::<T>()
);
None
} else {
unsafe { Some(&*(s.as_ptr() as *const T)) }
}
}
@ -328,12 +330,13 @@ mod test {
}
#[test]
#[allow(clippy::too_many_lines)]
fn counting_mergetest() {
use crate::{DBCompactionStyle, Options, DB};
use std::sync::Arc;
use std::thread;
let path = "_rust_rocksdb_partial_mergetest";
let path = "_rust_rocksdb_partial_merge_test";
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_compaction_style(DBCompactionStyle::Universal);
@ -370,6 +373,7 @@ mod test {
let d1 = db.clone();
let d2 = db.clone();
let d3 = db.clone();
let h1 = thread::spawn(move || {
for i in 0..500 {
let _ = d1.merge(b"k2", b"c");

@ -1,4 +1,4 @@
// Copyright 2018 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -13,12 +13,12 @@
// limitations under the License.
mod util;
use crate::util::DBPath;
use rocksdb::{
backup::{BackupEngine, BackupEngineOptions, RestoreOptions},
Options, DB,
};
use util::DBPath;
#[test]
fn backup_restore() {

@ -1,4 +1,4 @@
// Copyright 2018 Eugene P.
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,8 @@
mod util;
use crate::util::DBPath;
use rocksdb::{checkpoint::Checkpoint, Options, DB};
use util::DBPath;
#[test]
pub fn test_single_checkpoint() {

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,8 @@
mod util;
use crate::util::DBPath;
use rocksdb::{ColumnFamilyDescriptor, MergeOperands, Options, DB};
use util::DBPath;
#[test]
fn test_column_family() {

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,8 @@
mod util;
use crate::util::DBPath;
use rocksdb::{CompactionDecision, Options, DB};
use util::DBPath;
#[cfg(test)]
#[allow(unused_variables)]
@ -30,7 +30,7 @@ fn test_filter(level: u32, key: &[u8], value: &[u8]) -> CompactionDecision {
#[test]
fn compaction_filter_test() {
let path = DBPath::new("_rust_rocksdb_filtertest");
let path = DBPath::new("_rust_rocksdb_filter_test");
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_compaction_filter("test", test_filter);

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,10 +14,10 @@
mod util;
use crate::util::DBPath;
use rocksdb::{Error, IteratorMode, Options, Snapshot, WriteBatch, DB};
use std::sync::Arc;
use std::{mem, thread};
use util::DBPath;
#[test]
fn external() {
@ -83,7 +83,7 @@ fn writebatch_works() {
assert!(db.get(b"k1").unwrap().is_none());
assert_eq!(batch.len(), 0);
assert!(batch.is_empty());
let _ = batch.put(b"k1", b"v1111");
batch.put(b"k1", b"v1111");
assert_eq!(batch.len(), 1);
assert!(!batch.is_empty());
assert!(db.get(b"k1").unwrap().is_none());
@ -94,7 +94,7 @@ fn writebatch_works() {
{
// test delete
let mut batch = WriteBatch::default();
let _ = batch.delete(b"k1");
batch.delete(b"k1");
assert_eq!(batch.len(), 1);
assert!(!batch.is_empty());
assert!(db.write(batch).is_ok());
@ -104,7 +104,7 @@ fn writebatch_works() {
// test size_in_bytes
let mut batch = WriteBatch::default();
let before = batch.size_in_bytes();
let _ = batch.put(b"k1", b"v1234567890");
batch.put(b"k1", b"v1234567890");
let after = batch.size_in_bytes();
assert!(before + 10 <= after);
}
@ -180,9 +180,7 @@ fn sync_snapshot_test() {
let wrapper = SnapshotWrapper::new(&db);
let wrapper_1 = wrapper.clone();
let handler_1 = thread::spawn(move || wrapper_1.check("k1", b"v1"));
let wrapper_2 = wrapper.clone();
let handler_2 = thread::spawn(move || wrapper_2.check("k2", b"v2"));
let handler_2 = thread::spawn(move || wrapper.check("k2", b"v2"));
assert!(handler_1.join().unwrap());
assert!(handler_2.join().unwrap());

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,16 +14,17 @@
mod util;
use crate::util::DBPath;
use rocksdb::{Direction, IteratorMode, MemtableFactory, Options, DB};
use util::DBPath;
fn cba(input: &[u8]) -> Box<[u8]> {
input.to_vec().into_boxed_slice()
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_iterator() {
let n = DBPath::new("_rust_rocksdb_iteratortest");
let n = DBPath::new("_rust_rocksdb_iterator_test");
{
let k1: Box<[u8]> = b"k1".to_vec().into_boxed_slice();
let k2: Box<[u8]> = b"k2".to_vec().into_boxed_slice();
@ -172,7 +173,7 @@ fn key(k: &[u8]) -> Box<[u8]> {
#[test]
fn test_prefix_iterator() {
let n = DBPath::new("_rust_rocksdb_prefixiteratortest");
let n = DBPath::new("_rust_rocksdb_prefix_iterator_test");
{
let a1: Box<[u8]> = key(b"aaa1");
let a2: Box<[u8]> = key(b"aaa2");
@ -216,7 +217,7 @@ fn test_prefix_iterator_uses_full_prefix() {
// as long as the prefix extracted from `key` matches the
// prefix extracted from `prefix`.
let path = DBPath::new("_rust_rocksdb_prefixiteratorusesfullprefixtest");
let path = DBPath::new("_rust_rocksdb_prefix_iterator_uses_full_prefix_test");
{
let data = [
([0, 0, 0, 0], b"111"),
@ -253,7 +254,7 @@ fn test_prefix_iterator_uses_full_prefix() {
#[test]
fn test_full_iterator() {
let path = DBPath::new("fulliteratortest");
let path = DBPath::new("full_iterator_test");
{
let a1: Box<[u8]> = key(b"aaa1");
let a2: Box<[u8]> = key(b"aaa2");
@ -280,7 +281,7 @@ fn test_full_iterator() {
assert!(db.put(&*b1, &*b1).is_ok());
assert!(db.put(&*b2, &*b2).is_ok());
// A normal iterator won't work here since we're using a HashSkipList for our memtable
// A normal iterator won't work here since we're using a HashSkipList for our memory table
// implementation (which buckets keys based on their prefix):
let bad_iterator = db.iterator(IteratorMode::Start);
assert_eq!(bad_iterator.collect::<Vec<_>>(), vec![]);
@ -304,7 +305,7 @@ fn custom_iter<'a>(db: &'a DB) -> impl Iterator<Item = usize> + 'a {
#[test]
fn test_custom_iterator() {
let path = DBPath::new("_rust_rocksdb_customiterator_test");
let path = DBPath::new("_rust_rocksdb_custom_iterator_test");
{
let mut opts = Options::default();
opts.create_if_missing(true);

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,10 +14,10 @@
mod util;
use crate::util::DBPath;
use rocksdb::DB;
use std::sync::Arc;
use std::thread;
use util::DBPath;
const N: usize = 100_000;
@ -44,17 +44,10 @@ pub fn test_multithreaded() {
}
});
let db3 = db.clone();
let j3 = thread::spawn(move || {
for _ in 1..N {
let result = match db3.get(b"key") {
Ok(Some(v)) => {
if &v[..] != b"value1" && &v[..] != b"value2" {
false
} else {
true
}
}
let result = match db.get(b"key") {
Ok(Some(v)) => !(&v[..] != b"value1" && &v[..] != b"value2"),
_ => false,
};
assert!(result);

@ -1,7 +1,21 @@
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod util;
use crate::util::DBPath;
use rocksdb::{Options, DB};
use util::DBPath;
#[test]
fn test_pinnable_slice() {

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,8 @@
mod util;
use crate::util::DBPath;
use rocksdb::{Options, DB};
use util::DBPath;
#[test]
fn property_test() {

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,8 @@
mod util;
use crate::util::DBPath;
use rocksdb::DB;
use util::DBPath;
#[test]
pub fn test_forwards_iteration() {

@ -1,4 +1,4 @@
// Copyright 2014 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,9 +14,9 @@
mod util;
use crate::util::DBPath;
use rocksdb::{BlockBasedOptions, DataBlockIndexType, Options, ReadOptions, DB};
use std::{fs, io::Read as _};
use util::DBPath;
#[test]
fn test_set_num_levels() {

@ -1,11 +1,25 @@
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod util;
use crate::util::DBPath;
use rocksdb::{Options, SliceTransform, DB};
use util::DBPath;
#[test]
pub fn test_slice_transform() {
let db_path = DBPath::new("_rust_rocksdb_slicetransform_test");
let db_path = DBPath::new("_rust_rocksdb_slice_transform_test");
{
let a1: Box<[u8]> = key(b"aaa1");
let a2: Box<[u8]> = key(b"aaa2");

@ -1,4 +1,4 @@
// Copyright 2019 Tyler Neely
// Copyright 2020 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -17,9 +17,9 @@ use rocksdb::WriteBatch;
#[test]
fn test_write_batch_clear() {
let mut batch = WriteBatch::default();
let _ = batch.put(b"1", b"2");
batch.put(b"1", b"2");
assert_eq!(batch.len(), 1);
let _ = batch.clear();
batch.clear();
assert_eq!(batch.len(), 0);
assert!(batch.is_empty());
}

@ -1,5 +1,4 @@
use std::path::{Path, PathBuf};
use tempfile;
use rocksdb::{Options, DB};

Loading…
Cancel
Save