Bump bindgen 0.63.0 -> 0.64.0 (#734)

master
cwlittle 2 years ago committed by GitHub
parent c1314a3d59
commit 548b425a13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      librocksdb-sys/Cargo.toml
  2. 23
      librocksdb-sys/build.rs
  3. 12
      src/db.rs
  4. 3
      src/ffi_util.rs
  5. 3
      src/transactions/optimistic_transaction_db.rs
  6. 8
      src/transactions/transaction_db.rs
  7. 10
      tests/test_checkpoint.rs
  8. 15
      tests/test_column_family.rs
  9. 14
      tests/test_db.rs
  10. 3
      tests/test_iterator.rs
  11. 2
      tests/test_merge_operator.rs

@ -37,6 +37,6 @@ uuid = { version = "1.0", features = ["v4"] }
[build-dependencies] [build-dependencies]
cc = { version = "1.0", features = ["parallel"] } cc = { version = "1.0", features = ["parallel"] }
bindgen = { version = "0.63", default-features = false, features = ["runtime"] } bindgen = { version = "0.64", default-features = false, features = ["runtime"] }
glob = "0.3" glob = "0.3"
pkg-config = { version = "0.3", optional = true } pkg-config = { version = "0.3", optional = true }

@ -6,7 +6,7 @@ fn link(name: &str, bundled: bool) {
let target = var("TARGET").unwrap(); let target = var("TARGET").unwrap();
let target: Vec<_> = target.split('-').collect(); let target: Vec<_> = target.split('-').collect();
if target.get(2) == Some(&"windows") { if target.get(2) == Some(&"windows") {
println!("cargo:rustc-link-lib=dylib={}", name); println!("cargo:rustc-link-lib=dylib={name}");
if bundled && target.get(3) == Some(&"gnu") { if bundled && target.get(3) == Some(&"gnu") {
let dir = var("CARGO_MANIFEST_DIR").unwrap(); let dir = var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rustc-link-search=native={}/{}", dir, target[0]); println!("cargo:rustc-link-search=native={}/{}", dir, target[0]);
@ -16,10 +16,7 @@ fn link(name: &str, bundled: bool) {
fn fail_on_empty_directory(name: &str) { fn fail_on_empty_directory(name: &str) {
if fs::read_dir(name).unwrap().count() == 0 { if fs::read_dir(name).unwrap().count() == 0 {
println!( println!("The `{name}` directory is empty, did you forget to pull the submodules?");
"The `{}` directory is empty, did you forget to pull the submodules?",
name
);
println!("Try `git submodule update --init --recursive`"); println!("Try `git submodule update --init --recursive`");
panic!(); panic!();
} }
@ -288,19 +285,19 @@ fn build_snappy() {
} }
fn try_to_find_and_link_lib(lib_name: &str) -> bool { fn try_to_find_and_link_lib(lib_name: &str) -> bool {
println!("cargo:rerun-if-env-changed={}_COMPILE", lib_name); println!("cargo:rerun-if-env-changed={lib_name}_COMPILE");
if let Ok(v) = env::var(format!("{}_COMPILE", lib_name)) { if let Ok(v) = env::var(format!("{lib_name}_COMPILE")) {
if v.to_lowercase() == "true" || v == "1" { if v.to_lowercase() == "true" || v == "1" {
return false; return false;
} }
} }
println!("cargo:rerun-if-env-changed={}_LIB_DIR", lib_name); println!("cargo:rerun-if-env-changed={lib_name}_LIB_DIR");
println!("cargo:rerun-if-env-changed={}_STATIC", lib_name); println!("cargo:rerun-if-env-changed={lib_name}_STATIC");
if let Ok(lib_dir) = env::var(format!("{}_LIB_DIR", lib_name)) { if let Ok(lib_dir) = env::var(format!("{lib_name}_LIB_DIR")) {
println!("cargo:rustc-link-search=native={}", lib_dir); println!("cargo:rustc-link-search=native={lib_dir}");
let mode = match env::var_os(format!("{}_STATIC", lib_name)) { let mode = match env::var_os(format!("{lib_name}_STATIC")) {
Some(_) => "static", Some(_) => "static",
None => "dylib", None => "dylib",
}; };
@ -313,7 +310,7 @@ fn try_to_find_and_link_lib(lib_name: &str) -> bool {
fn cxx_standard() -> String { fn cxx_standard() -> String {
env::var("ROCKSDB_CXX_STD").map_or("-std=c++17".to_owned(), |cxx_std| { env::var("ROCKSDB_CXX_STD").map_or("-std=c++17".to_owned(), |cxx_std| {
if !cxx_std.starts_with("-std=") { if !cxx_std.starts_with("-std=") {
format!("-std={}", cxx_std) format!("-std={cxx_std}")
} else { } else {
cxx_std cxx_std
} }

@ -603,8 +603,7 @@ impl<T: ThreadMode> DBWithThreadMode<T> {
if let Err(e) = fs::create_dir_all(&path) { if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!( return Err(Error::new(format!(
"Failed to create RocksDB directory: `{:?}`.", "Failed to create RocksDB directory: `{e:?}`."
e
))); )));
} }
@ -1733,8 +1732,7 @@ impl<T: ThreadMode, D: DBInner> DBCommon<T, D> {
Ok(prop_name) => get_property(prop_name.as_ptr()), Ok(prop_name) => get_property(prop_name.as_ptr()),
Err(e) => { Err(e) => {
return Err(Error::new(format!( return Err(Error::new(format!(
"Failed to convert property name to CString: {}", "Failed to convert property name to CString: {e}"
e
))); )));
} }
}; };
@ -1744,8 +1742,7 @@ impl<T: ThreadMode, D: DBInner> DBCommon<T, D> {
let result = match unsafe { CStr::from_ptr(value) }.to_str() { let result = match unsafe { CStr::from_ptr(value) }.to_str() {
Ok(s) => parse(s).map(|value| Some(value)), Ok(s) => parse(s).map(|value| Some(value)),
Err(e) => Err(Error::new(format!( Err(e) => Err(Error::new(format!(
"Failed to convert property value to string: {}", "Failed to convert property value to string: {e}"
e
))), ))),
}; };
unsafe { unsafe {
@ -1787,8 +1784,7 @@ impl<T: ThreadMode, D: DBInner> DBCommon<T, D> {
fn parse_property_int_value(value: &str) -> Result<u64, Error> { fn parse_property_int_value(value: &str) -> Result<u64, Error> {
value.parse::<u64>().map_err(|err| { value.parse::<u64>().map_err(|err| {
Error::new(format!( Error::new(format!(
"Failed to convert property value {} to int: {}", "Failed to convert property value {value} to int: {err}"
value, err
)) ))
}) })
} }

@ -54,8 +54,7 @@ pub(crate) fn to_cpath<P: AsRef<Path>>(path: P) -> Result<CString, Error> {
match CString::new(path.as_ref().to_string_lossy().as_bytes()) { match CString::new(path.as_ref().to_string_lossy().as_bytes()) {
Ok(c) => Ok(c), Ok(c) => Ok(c),
Err(e) => Err(Error::new(format!( Err(e) => Err(Error::new(format!(
"Failed to convert path to CString: {}", "Failed to convert path to CString: {e}"
e,
))), ))),
} }
} }

@ -134,8 +134,7 @@ impl<T: ThreadMode> OptimisticTransactionDB<T> {
if let Err(e) = fs::create_dir_all(&path) { if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!( return Err(Error::new(format!(
"Failed to create RocksDB directory: `{:?}`.", "Failed to create RocksDB directory: `{e:?}`."
e
))); )));
} }

@ -234,8 +234,7 @@ impl<T: ThreadMode> TransactionDB<T> {
if let Err(e) = fs::create_dir_all(&path) { if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!( return Err(Error::new(format!(
"Failed to create RocksDB directory: `{:?}`.", "Failed to create RocksDB directory: `{e:?}`."
e
))); )));
} }
@ -360,13 +359,12 @@ impl<T: ThreadMode> TransactionDB<T> {
name: &str, name: &str,
opts: &Options, opts: &Options,
) -> Result<*mut ffi::rocksdb_column_family_handle_t, Error> { ) -> Result<*mut ffi::rocksdb_column_family_handle_t, Error> {
let cf_name = if let Ok(c) = CString::new(name.as_bytes()) { let Ok(cf_name) = CString::new(name.as_bytes()) else {
c
} else {
return Err(Error::new( return Err(Error::new(
"Failed to convert path to CString when creating cf".to_owned(), "Failed to convert path to CString when creating cf".to_owned(),
)); ));
}; };
Ok(unsafe { Ok(unsafe {
ffi_try!(ffi::rocksdb_transactiondb_create_column_family( ffi_try!(ffi::rocksdb_transactiondb_create_column_family(
self.inner, self.inner,

@ -24,7 +24,7 @@ pub fn test_single_checkpoint() {
const PATH_PREFIX: &str = "_rust_rocksdb_cp_single_"; const PATH_PREFIX: &str = "_rust_rocksdb_cp_single_";
// Create DB with some data // Create DB with some data
let db_path = DBPath::new(&format!("{}db1", PATH_PREFIX)); let db_path = DBPath::new(&format!("{PATH_PREFIX}db1"));
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(true); opts.create_if_missing(true);
@ -37,7 +37,7 @@ pub fn test_single_checkpoint() {
// Create checkpoint // Create checkpoint
let cp1 = Checkpoint::new(&db).unwrap(); let cp1 = Checkpoint::new(&db).unwrap();
let cp1_path = DBPath::new(&format!("{}cp1", PATH_PREFIX)); let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1"));
cp1.create_checkpoint(&cp1_path).unwrap(); cp1.create_checkpoint(&cp1_path).unwrap();
// Verify checkpoint // Verify checkpoint
@ -54,7 +54,7 @@ pub fn test_multi_checkpoints() {
const PATH_PREFIX: &str = "_rust_rocksdb_cp_multi_"; const PATH_PREFIX: &str = "_rust_rocksdb_cp_multi_";
// Create DB with some data // Create DB with some data
let db_path = DBPath::new(&format!("{}db1", PATH_PREFIX)); let db_path = DBPath::new(&format!("{PATH_PREFIX}db1"));
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(true); opts.create_if_missing(true);
@ -67,7 +67,7 @@ pub fn test_multi_checkpoints() {
// Create first checkpoint // Create first checkpoint
let cp1 = Checkpoint::new(&db).unwrap(); let cp1 = Checkpoint::new(&db).unwrap();
let cp1_path = DBPath::new(&format!("{}cp1", PATH_PREFIX)); let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1"));
cp1.create_checkpoint(&cp1_path).unwrap(); cp1.create_checkpoint(&cp1_path).unwrap();
// Verify checkpoint // Verify checkpoint
@ -88,7 +88,7 @@ pub fn test_multi_checkpoints() {
// Create another checkpoint // Create another checkpoint
let cp2 = Checkpoint::new(&db).unwrap(); let cp2 = Checkpoint::new(&db).unwrap();
let cp2_path = DBPath::new(&format!("{}cp2", PATH_PREFIX)); let cp2_path = DBPath::new(&format!("{PATH_PREFIX}cp2"));
cp2.create_checkpoint(&cp2_path).unwrap(); cp2.create_checkpoint(&cp2_path).unwrap();
// Verify second checkpoint // Verify second checkpoint

@ -334,11 +334,11 @@ fn test_merge_operator() {
db.merge_cf(&cf1, b"k1", b"d").unwrap(); db.merge_cf(&cf1, b"k1", b"d").unwrap();
db.merge_cf(&cf1, b"k1", b"efg").unwrap(); db.merge_cf(&cf1, b"k1", b"efg").unwrap();
let m = db.merge_cf(&cf1, b"k1", b"h"); let m = db.merge_cf(&cf1, b"k1", b"h");
println!("m is {:?}", m); println!("m is {m:?}");
// TODO assert!(m.is_ok()); // TODO assert!(m.is_ok());
match db.get(b"k1") { match db.get(b"k1") {
Ok(Some(value)) => match std::str::from_utf8(&value) { Ok(Some(value)) => match std::str::from_utf8(&value) {
Ok(v) => println!("retrieved utf8 value: {}", v), Ok(v) => println!("retrieved utf8 value: {v}"),
Err(_) => println!("did not read valid utf-8 out of the db"), Err(_) => println!("did not read valid utf-8 out of the db"),
}, },
Err(_) => println!("error reading value"), Err(_) => println!("error reading value"),
@ -458,13 +458,13 @@ fn test_no_leaked_column_family() {
// repeat creating and dropping cfs many time to indirectly detect // repeat creating and dropping cfs many time to indirectly detect
// possible leak via large dir. // possible leak via large dir.
for cf_index in 0..20 { for cf_index in 0..20 {
let cf_name = format!("cf{}", cf_index); let cf_name = format!("cf{cf_index}");
db.create_cf(&cf_name, &Options::default()).unwrap(); db.create_cf(&cf_name, &Options::default()).unwrap();
let cf = db.cf_handle(&cf_name).unwrap(); let cf = db.cf_handle(&cf_name).unwrap();
let mut batch = rocksdb::WriteBatch::default(); let mut batch = rocksdb::WriteBatch::default();
for key_index in 0..100 { for key_index in 0..100 {
batch.put_cf(&cf, format!("k{}", key_index), &large_blob); batch.put_cf(&cf, format!("k{key_index}"), &large_blob);
} }
db.write_opt(batch, &write_options).unwrap(); db.write_opt(batch, &write_options).unwrap();
@ -480,11 +480,8 @@ fn test_no_leaked_column_family() {
// if we're not leaking, the dir bytes should be well under 10M bytes in total // if we're not leaking, the dir bytes should be well under 10M bytes in total
let dir_bytes = dir_size(&n).unwrap(); let dir_bytes = dir_size(&n).unwrap();
assert!( let leak_msg = format!("{dir_bytes} is too large (maybe leaking...)");
dir_bytes < 10_000_000, assert!(dir_bytes < 10_000_000, "{}", leak_msg);
"{} is too large (maybe leaking...)",
dir_bytes
);
// only if MultiThreaded, cf can outlive db.drop_cf() and shouldn't cause SEGV... // only if MultiThreaded, cf can outlive db.drop_cf() and shouldn't cause SEGV...
#[cfg(feature = "multi-threaded-cf")] #[cfg(feature = "multi-threaded-cf")]

@ -710,7 +710,7 @@ fn fifo_compaction_test() {
let block_cache_hit_count = ctx.metric(PerfMetric::BlockCacheHitCount); let block_cache_hit_count = ctx.metric(PerfMetric::BlockCacheHitCount);
if block_cache_hit_count > 0 { if block_cache_hit_count > 0 {
let expect = format!("block_cache_hit_count = {}", block_cache_hit_count); let expect = format!("block_cache_hit_count = {block_cache_hit_count}");
assert!(ctx.report(true).contains(&expect)); assert!(ctx.report(true).contains(&expect));
} }
@ -829,7 +829,7 @@ fn get_with_cache_and_bulkload_test() {
// write a lot // write a lot
let mut batch = WriteBatch::default(); let mut batch = WriteBatch::default();
for i in 0..10_000 { for i in 0..10_000 {
batch.put(format!("{:0>4}", i).as_bytes(), b"v"); batch.put(format!("{i:0>4}").as_bytes(), b"v");
} }
assert!(db.write(batch).is_ok()); assert!(db.write(batch).is_ok());
@ -858,7 +858,7 @@ fn get_with_cache_and_bulkload_test() {
// try to get key // try to get key
let iter = db.iterator(IteratorMode::Start); let iter = db.iterator(IteratorMode::Start);
for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() { for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() {
assert_eq!(k.as_ref(), format!("{:0>4}", expected).as_bytes()); assert_eq!(k.as_ref(), format!("{expected:0>4}").as_bytes());
} }
// check live files (sst files meta) // check live files (sst files meta)
@ -919,7 +919,7 @@ fn get_with_cache_and_bulkload_test() {
// try to get key // try to get key
let iter = db.iterator(IteratorMode::Start); let iter = db.iterator(IteratorMode::Start);
for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() { for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() {
assert_eq!(k.as_ref(), format!("{:0>4}", expected).as_bytes()); assert_eq!(k.as_ref(), format!("{expected:0>4}").as_bytes());
} }
} }
} }
@ -964,7 +964,7 @@ fn get_with_cache_and_bulkload_and_blobs_test() {
// write a lot // write a lot
let mut batch = WriteBatch::default(); let mut batch = WriteBatch::default();
for i in 0..10_000 { for i in 0..10_000 {
batch.put(format!("{:0>4}", i).as_bytes(), b"v"); batch.put(format!("{i:0>4}").as_bytes(), b"v");
} }
assert!(db.write(batch).is_ok()); assert!(db.write(batch).is_ok());
@ -993,7 +993,7 @@ fn get_with_cache_and_bulkload_and_blobs_test() {
// try to get key // try to get key
let iter = db.iterator(IteratorMode::Start); let iter = db.iterator(IteratorMode::Start);
for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() { for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() {
assert_eq!(k.as_ref(), format!("{:0>4}", expected).as_bytes()); assert_eq!(k.as_ref(), format!("{expected:0>4}").as_bytes());
} }
// check live files (sst files meta) // check live files (sst files meta)
@ -1054,7 +1054,7 @@ fn get_with_cache_and_bulkload_and_blobs_test() {
// try to get key // try to get key
let iter = db.iterator(IteratorMode::Start); let iter = db.iterator(IteratorMode::Start);
for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() { for (expected, (k, _)) in iter.map(Result::unwrap).enumerate() {
assert_eq!(k.as_ref(), format!("{:0>4}", expected).as_bytes()); assert_eq!(k.as_ref(), format!("{expected:0>4}").as_bytes());
} }
} }
} }

@ -77,7 +77,8 @@ fn test_iterator() {
let mut it = db.iterator(IteratorMode::From(key, dir)); let mut it = db.iterator(IteratorMode::From(key, dir));
let value = it.next(); let value = it.next();
if valid { if valid {
assert!(matches!(value, Some(Ok(_))), "{:?}", value); let expect = format!("{value:?}");
assert!(matches!(value, Some(Ok(_))), "{:?}", &expect);
} else { } else {
assert_eq!(None, value); assert_eq!(None, value);
assert_eq!(None, it.next()); // Iterator is fused assert_eq!(None, it.next()); // Iterator is fused

@ -60,7 +60,7 @@ fn merge_test() {
match db.get(b"k1") { match db.get(b"k1") {
Ok(Some(value)) => { Ok(Some(value)) => {
if let Ok(v) = std::str::from_utf8(&value) { if let Ok(v) = std::str::from_utf8(&value) {
println!("retrieved utf8 value: {}", v) println!("retrieved utf8 value: {v}")
} else { } else {
println!("did not read valid utf-8 out of the db") println!("did not read valid utf-8 out of the db")
} }

Loading…
Cancel
Save