Update to RocksDB 8.0.0 (#761)

* Compressed block cache removed.
* RocksDB Lite removed.

https://github.com/facebook/rocksdb/releases/tag/v8.0.0
master
Niklas Fiekas 2 years ago committed by GitHub
parent 44dc84171a
commit c08acedbbd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      Cargo.toml
  2. 2
      librocksdb-sys/Cargo.toml
  3. 18
      librocksdb-sys/build_version.cc
  4. 2
      librocksdb-sys/rocksdb
  5. 4
      librocksdb-sys/rocksdb_lib_sources.txt
  6. 30
      src/db_options.rs
  7. 7
      tests/test_merge_operator.rs

@ -36,7 +36,7 @@ serde1 = ["serde"]
[dependencies] [dependencies]
libc = "0.2" libc = "0.2"
librocksdb-sys = { path = "librocksdb-sys", version = "0.10.0" } librocksdb-sys = { path = "librocksdb-sys", version = "0.11.0" }
serde = { version = "1", features = [ "derive" ], optional = true } serde = { version = "1", features = [ "derive" ], optional = true }
[dev-dependencies] [dev-dependencies]

@ -1,6 +1,6 @@
[package] [package]
name = "librocksdb-sys" name = "librocksdb-sys"
version = "0.10.0+7.9.2" version = "0.11.0+8.0.0"
edition = "2018" edition = "2018"
rust-version = "1.60" rust-version = "1.60"
authors = ["Karl Hobley <karlhobley10@gmail.com>", "Arkadiy Paronyan <arkadiy@ethcore.io>"] authors = ["Karl Hobley <karlhobley10@gmail.com>", "Arkadiy Paronyan <arkadiy@ethcore.io>"]

@ -8,28 +8,20 @@
// The build script may replace these values with real values based // The build script may replace these values with real values based
// on whether or not GIT is available and the platform settings // on whether or not GIT is available and the platform settings
static const std::string rocksdb_build_git_sha = "444b3f4845dd01b0d127c4b420fdd3b50ad56682"; static const std::string rocksdb_build_git_sha = "fdf403f5918a2b4355cf75ebe5e21d0fc22db880";
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v7.9.2"; static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v8.0.0";
#define HAS_GIT_CHANGES 0 #define HAS_GIT_CHANGES 0
#if HAS_GIT_CHANGES == 0 #if HAS_GIT_CHANGES == 0
// If HAS_GIT_CHANGES is 0, the GIT date is used. // If HAS_GIT_CHANGES is 0, the GIT date is used.
// Use the time the branch/tag was last modified // Use the time the branch/tag was last modified
static const std::string rocksdb_build_date = "rocksdb_build_date:2022-12-22 09:30:39"; static const std::string rocksdb_build_date = "rocksdb_build_date:2023-02-19 21:44:55";
#else #else
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. // If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
// Use the time the build was created. // Use the time the build was created.
static const std::string rocksdb_build_date = "rocksdb_build_date:2022-12-22 09:20:39"; static const std::string rocksdb_build_date = "rocksdb_build_date:2023-02-19 21:44:55";
#endif #endif
#ifndef ROCKSDB_LITE std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {};
extern "C" {
} // extern "C"
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {
};
#endif //ROCKSDB_LITE
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) { static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {

@ -1 +1 @@
Subproject commit 444b3f4845dd01b0d127c4b420fdd3b50ad56682 Subproject commit fdf403f5918a2b4355cf75ebe5e21d0fc22db880

@ -1,6 +1,7 @@
cache/cache.cc cache/cache.cc
cache/cache_entry_roles.cc cache/cache_entry_roles.cc
cache/cache_key.cc cache/cache_key.cc
cache/cache_helpers.cc
cache/cache_reservation_manager.cc cache/cache_reservation_manager.cc
cache/charged_cache.cc cache/charged_cache.cc
cache/clock_cache.cc cache/clock_cache.cc
@ -152,6 +153,7 @@ options/db_options.cc
options/options.cc options/options.cc
options/options_helper.cc options/options_helper.cc
options/options_parser.cc options/options_parser.cc
port/mmap.cc
port/port_posix.cc port/port_posix.cc
port/stack_trace.cc port/stack_trace.cc
table/adaptive/adaptive_table_factory.cc table/adaptive/adaptive_table_factory.cc
@ -162,6 +164,7 @@ table/block_based/block_based_table_factory.cc
table/block_based/block_based_table_iterator.cc table/block_based/block_based_table_iterator.cc
table/block_based/block_based_table_reader.cc table/block_based/block_based_table_reader.cc
table/block_based/block_builder.cc table/block_based/block_builder.cc
table/block_based/block_cache.cc
table/block_based/block_prefetcher.cc table/block_based/block_prefetcher.cc
table/block_based/block_prefix_index.cc table/block_based/block_prefix_index.cc
table/block_based/data_block_hash_index.cc table/block_based/data_block_hash_index.cc
@ -223,6 +226,7 @@ util/compression_context_cache.cc
util/concurrent_task_limiter_impl.cc util/concurrent_task_limiter_impl.cc
util/crc32c.cc util/crc32c.cc
util/crc32c_arm64.cc util/crc32c_arm64.cc
util/data_structure.cc
util/dynamic_bloom.cc util/dynamic_bloom.cc
util/hash.cc util/hash.cc
util/murmurhash.cc util/murmurhash.cc

@ -106,14 +106,12 @@ impl OptionsMustOutliveDB {
#[derive(Default)] #[derive(Default)]
struct BlockBasedOptionsMustOutliveDB { struct BlockBasedOptionsMustOutliveDB {
block_cache: Option<Cache>, block_cache: Option<Cache>,
block_cache_compressed: Option<Cache>,
} }
impl BlockBasedOptionsMustOutliveDB { impl BlockBasedOptionsMustOutliveDB {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
block_cache: self.block_cache.as_ref().map(Cache::clone), block_cache: self.block_cache.as_ref().map(Cache::clone),
block_cache_compressed: self.block_cache_compressed.as_ref().map(Cache::clone),
} }
} }
} }
@ -393,24 +391,6 @@ impl BlockBasedOptions {
} }
} }
/// When configured: use the specified cache for compressed blocks.
/// Otherwise rocksdb will not use a compressed block cache.
///
/// Note: though it looks similar to `block_cache`, RocksDB doesn't put the
/// same type of object there.
#[deprecated(
since = "0.15.0",
note = "This function will be removed in next release. Use set_block_cache_compressed instead"
)]
pub fn set_lru_cache_compressed(&mut self, size: size_t) {
let cache = new_cache(size);
unsafe {
// Since cache is wrapped in shared_ptr, we don't need to
// call rocksdb_cache_destroy explicitly.
ffi::rocksdb_block_based_options_set_block_cache_compressed(self.inner, cache);
}
}
/// Sets global cache for blocks (user data is stored in a set of blocks, and /// Sets global cache for blocks (user data is stored in a set of blocks, and
/// a block is the unit of reading from disk). Cache must outlive DB instance which uses it. /// a block is the unit of reading from disk). Cache must outlive DB instance which uses it.
/// ///
@ -423,16 +403,6 @@ impl BlockBasedOptions {
self.outlive.block_cache = Some(cache.clone()); self.outlive.block_cache = Some(cache.clone());
} }
/// Sets global cache for compressed blocks. Cache must outlive DB instance which uses it.
///
/// By default, rocksdb will not use a compressed block cache.
pub fn set_block_cache_compressed(&mut self, cache: &Cache) {
unsafe {
ffi::rocksdb_block_based_options_set_block_cache_compressed(self.inner, cache.0.inner);
}
self.outlive.block_cache_compressed = Some(cache.clone());
}
/// Disable block cache /// Disable block cache
pub fn disable_cache(&mut self) { pub fn disable_cache(&mut self) {
unsafe { unsafe {

@ -267,7 +267,12 @@ fn failed_merge_test() {
match res.and_then(|_e| db.get(b"key")) { match res.and_then(|_e| db.get(b"key")) {
Ok(val) => panic!("expected merge failure to propagate, got: {:?}", val), Ok(val) => panic!("expected merge failure to propagate, got: {:?}", val),
Err(e) => { Err(e) => {
assert!(e.into_string().contains("Could not perform merge.")); let msg = e.into_string();
assert!(
msg.contains("Merge operator failed"),
"unexpected merge error message: {}",
msg
);
} }
} }
} }

Loading…
Cancel
Save