Remove compressed block cache (#11117)

Summary:
Compressed block cache is replaced by compressed secondary cache. Remove the feature.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11117

Test Plan: See CI passes

Reviewed By: pdillinger

Differential Revision: D42700164

fbshipit-source-id: 6cbb24e460da29311150865f60ecb98637f9f67d
oxigraph-8.1.1
sdong 2 years ago committed by Facebook GitHub Bot
parent 4a9185340d
commit 2800aa069a
  1. 3
      HISTORY.md
  2. 37
      cache/lru_cache_test.cc
  3. 8
      db/c.cc
  4. 31
      db/db_basic_test.cc
  5. 326
      db/db_block_cache_test.cc
  6. 1
      db/db_tailing_iter_test.cc
  7. 61
      db/db_test2.cc
  8. 4
      db/db_test_util.cc
  9. 19
      db/db_test_util.h
  10. 14
      db_stress_tool/db_stress_test_base.cc
  11. 2
      db_stress_tool/db_stress_test_base.h
  12. 4
      include/rocksdb/c.h
  13. 9
      include/rocksdb/table.h
  14. 22
      java/rocksjni/table.cc
  15. 3
      java/samples/src/main/java/RocksDBSample.java
  16. 130
      java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
  17. 2
      java/src/main/java/org/rocksdb/OptionsUtil.java
  18. 74
      java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
  19. 3
      options/options_settable_test.cc
  20. 88
      options/options_test.cc
  21. 49
      table/block_based/block_based_table_builder.cc
  22. 75
      table/block_based/block_based_table_factory.cc
  23. 107
      table/block_based/block_based_table_reader.cc
  24. 8
      table/block_based/block_based_table_reader.h
  25. 5
      table/block_based/block_based_table_reader_sync_and_async.h
  26. 4
      table/block_based/block_cache.h
  27. 7
      table/block_based/reader_common.h
  28. 1
      tools/db_bench_tool.cc
  29. 1
      utilities/memory/memory_test.cc
  30. 49
      utilities/persistent_cache/persistent_cache_test.cc

@ -3,6 +3,9 @@
### Bug Fixes
* Fixed a data race on `ColumnFamilyData::flush_reason` caused by concurrent flushes.
### Feature Removal
* The feature block_cache_compressed is removed. Statistics related to it are removed too.
## 7.10.0 (01/23/2023)
### Behavior changes
* Make best-efforts recovery verify SST unique ID before Version construction (#10962)

@ -1784,43 +1784,6 @@ TEST_F(DBSecondaryCacheTest, SecondaryCacheFailureTest) {
Destroy(options);
}
TEST_F(DBSecondaryCacheTest, TestSecondaryWithCompressedCache) {
if (!Snappy_Supported()) {
ROCKSDB_GTEST_SKIP("Compressed cache test requires snappy support");
return;
}
LRUCacheOptions opts(2000 /* capacity */, 0 /* num_shard_bits */,
false /* strict_capacity_limit */,
0.5 /* high_pri_pool_ratio */,
nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
kDontChargeCacheMetadata);
std::shared_ptr<TestSecondaryCache> secondary_cache(
new TestSecondaryCache(2048 * 1024));
opts.secondary_cache = secondary_cache;
std::shared_ptr<Cache> cache = NewLRUCache(opts);
BlockBasedTableOptions table_options;
table_options.block_cache_compressed = cache;
table_options.no_block_cache = true;
table_options.block_size = 1234;
Options options = GetDefaultOptions();
options.compression = kSnappyCompression;
options.create_if_missing = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options);
Random rnd(301);
const int N = 6;
for (int i = 0; i < N; i++) {
// Partly compressible
std::string p_v = rnd.RandomString(507) + std::string(500, ' ');
ASSERT_OK(Put(Key(i), p_v));
}
ASSERT_OK(Flush());
for (int i = 0; i < 2 * N; i++) {
std::string v = Get(Key(i % N));
ASSERT_EQ(1007, v.size());
}
}
TEST_F(LRUCacheSecondaryCacheTest, BasicWaitAllTest) {
LRUCacheOptions opts(1024 /* capacity */, 2 /* num_shard_bits */,
false /* strict_capacity_limit */,

@ -2620,14 +2620,6 @@ void rocksdb_block_based_options_set_block_cache(
}
}
void rocksdb_block_based_options_set_block_cache_compressed(
rocksdb_block_based_table_options_t* options,
rocksdb_cache_t* block_cache_compressed) {
if (block_cache_compressed) {
options->rep.block_cache_compressed = block_cache_compressed->rep;
}
}
void rocksdb_block_based_options_set_whole_key_filtering(
rocksdb_block_based_table_options_t* options, unsigned char v) {
options->rep.whole_key_filtering = v;

@ -3364,17 +3364,13 @@ INSTANTIATE_TEST_CASE_P(DBBasicTestTrackWal, DBBasicTestTrackWal,
class DBBasicTestMultiGet : public DBTestBase {
public:
DBBasicTestMultiGet(std::string test_dir, int num_cfs, bool compressed_cache,
DBBasicTestMultiGet(std::string test_dir, int num_cfs,
bool uncompressed_cache, bool _compression_enabled,
bool _fill_cache, uint32_t compression_parallel_threads)
: DBTestBase(test_dir, /*env_do_fsync=*/false) {
compression_enabled_ = _compression_enabled;
fill_cache_ = _fill_cache;
if (compressed_cache) {
std::shared_ptr<Cache> cache = NewLRUCache(1048576);
compressed_cache_ = std::make_shared<MyBlockCache>(cache);
}
if (uncompressed_cache) {
std::shared_ptr<Cache> cache = NewLRUCache(1048576);
uncompressed_cache_ = std::make_shared<MyBlockCache>(cache);
@ -3418,7 +3414,6 @@ class DBBasicTestMultiGet : public DBTestBase {
} else {
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
}
table_options.block_cache_compressed = compressed_cache_;
table_options.flush_block_policy_factory.reset(
new MyFlushBlockPolicyFactory());
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
@ -3601,16 +3596,14 @@ class DBBasicTestMultiGet : public DBTestBase {
std::vector<std::string> cf_names_;
};
class DBBasicTestWithParallelIO
: public DBBasicTestMultiGet,
class DBBasicTestWithParallelIO : public DBBasicTestMultiGet,
public testing::WithParamInterface<
std::tuple<bool, bool, bool, bool, uint32_t>> {
std::tuple<bool, bool, bool, uint32_t>> {
public:
DBBasicTestWithParallelIO()
: DBBasicTestMultiGet("/db_basic_test_with_parallel_io", 1,
std::get<0>(GetParam()), std::get<1>(GetParam()),
std::get<2>(GetParam()), std::get<3>(GetParam()),
std::get<4>(GetParam())) {}
std::get<2>(GetParam()), std::get<3>(GetParam())) {}
};
TEST_P(DBBasicTestWithParallelIO, MultiGet) {
@ -3941,13 +3934,12 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithMissingFile) {
INSTANTIATE_TEST_CASE_P(ParallelIO, DBBasicTestWithParallelIO,
// Params are as follows -
// Param 0 - Compressed cache enabled
// Param 1 - Uncompressed cache enabled
// Param 2 - Data compression enabled
// Param 3 - ReadOptions::fill_cache
// Param 4 - CompressionOptions::parallel_threads
// Param 0 - Uncompressed cache enabled
// Param 1 - Data compression enabled
// Param 2 - ReadOptions::fill_cache
// Param 3 - CompressionOptions::parallel_threads
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Bool(), ::testing::Bool(),
::testing::Bool(),
::testing::Values(1, 4)));
// Forward declaration
@ -4158,9 +4150,8 @@ class DBBasicTestMultiGetDeadline : public DBBasicTestMultiGet,
DBBasicTestMultiGetDeadline()
: DBBasicTestMultiGet(
"db_basic_test_multiget_deadline" /*Test dir*/,
10 /*# of column families*/, false /*compressed cache enabled*/,
true /*uncompressed cache enabled*/, true /*compression enabled*/,
true /*ReadOptions.fill_cache*/,
10 /*# of column families*/, true /*uncompressed cache enabled*/,
true /*compression enabled*/, true /*ReadOptions.fill_cache*/,
1 /*# of parallel compression threads*/) {}
inline void CheckStatus(std::vector<Status>& statuses, size_t num_ok) {

@ -44,10 +44,6 @@ class DBBlockCacheTest : public DBTestBase {
size_t compression_dict_miss_count_ = 0;
size_t compression_dict_hit_count_ = 0;
size_t compression_dict_insert_count_ = 0;
size_t compressed_miss_count_ = 0;
size_t compressed_hit_count_ = 0;
size_t compressed_insert_count_ = 0;
size_t compressed_failure_count_ = 0;
public:
const size_t kNumBlocks = 10;
@ -85,14 +81,6 @@ class DBBlockCacheTest : public DBTestBase {
hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
compressed_miss_count_ =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
compressed_hit_count_ =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
compressed_insert_count_ =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
compressed_failure_count_ =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
}
void RecordCacheCountersForCompressionDict(const Options& options) {
@ -144,29 +132,6 @@ class DBBlockCacheTest : public DBTestBase {
compression_dict_insert_count_ = new_compression_dict_insert_count;
}
void CheckCompressedCacheCounters(const Options& options,
size_t expected_misses,
size_t expected_hits,
size_t expected_inserts,
size_t expected_failures) {
size_t new_miss_count =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
size_t new_hit_count =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
size_t new_insert_count =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
size_t new_failure_count =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count);
ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count);
ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count);
ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count);
compressed_miss_count_ = new_miss_count;
compressed_hit_count_ = new_hit_count;
compressed_insert_count_ = new_insert_count;
compressed_failure_count_ = new_failure_count;
}
#ifndef ROCKSDB_LITE
const std::array<size_t, kNumCacheEntryRoles> GetCacheEntryRoleCountsBg() {
// Verify in cache entry role stats
@ -274,84 +239,6 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
}
#ifdef SNAPPY
TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
table_options.block_cache_compressed = nullptr;
table_options.block_size = 1;
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
table_options.cache_index_and_filter_blocks = false;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.compression = CompressionType::kSnappyCompression;
DestroyAndReopen(options);
std::string value(kValueSize, 'a');
for (size_t i = 0; i < kNumBlocks; i++) {
ASSERT_OK(Put(std::to_string(i), value));
ASSERT_OK(Flush());
}
ReadOptions read_options;
std::shared_ptr<Cache> compressed_cache = NewLRUCache(1 << 25, 0, false);
LRUCacheOptions co;
co.capacity = 0;
co.num_shard_bits = 0;
co.strict_capacity_limit = false;
// Needed not to count entry stats collector
co.metadata_charge_policy = kDontChargeCacheMetadata;
std::shared_ptr<Cache> cache = NewLRUCache(co);
table_options.block_cache = cache;
table_options.no_block_cache = false;
table_options.block_cache_compressed = compressed_cache;
table_options.max_auto_readahead_size = 0;
table_options.cache_index_and_filter_blocks = false;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
Reopen(options);
RecordCacheCounters(options);
// Load blocks into cache.
for (size_t i = 0; i < kNumBlocks - 1; i++) {
ASSERT_EQ(value, Get(std::to_string(i)));
CheckCacheCounters(options, 1, 0, 1, 0);
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
}
size_t usage = cache->GetUsage();
ASSERT_EQ(0, usage);
ASSERT_EQ(usage, cache->GetPinnedUsage());
size_t compressed_usage = compressed_cache->GetUsage();
ASSERT_LT(0, compressed_usage);
// Compressed block cache cannot be pinned.
ASSERT_EQ(0, compressed_cache->GetPinnedUsage());
// Set strict capacity limit flag. Now block will only load into compressed
// block cache.
cache->SetCapacity(usage);
cache->SetStrictCapacityLimit(true);
ASSERT_EQ(usage, cache->GetPinnedUsage());
// Load last key block.
ASSERT_EQ(
"Operation aborted: Memory limit reached: Insert failed due to LRU cache "
"being full.",
Get(std::to_string(kNumBlocks - 1)));
// Failure will also record the miss counter.
CheckCacheCounters(options, 1, 0, 0, 1);
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
// Clear strict capacity limit flag. This time we shall hit compressed block
// cache and load into block cache.
cache->SetStrictCapacityLimit(false);
// Load last key block.
ASSERT_EQ(value, Get(std::to_string(kNumBlocks - 1)));
CheckCacheCounters(options, 1, 0, 1, 0);
CheckCompressedCacheCounters(options, 0, 1, 0, 0);
}
namespace {
class PersistentCacheFromCache : public PersistentCache {
@ -413,80 +300,6 @@ class ReadOnlyCacheWrapper : public CacheWrapper {
};
} // anonymous namespace
TEST_F(DBBlockCacheTest, TestWithSameCompressed) {
auto table_options = GetTableOptions();
auto options = GetOptions(table_options);
InitTable(options);
std::shared_ptr<Cache> rw_cache{NewLRUCache(1000000)};
std::shared_ptr<PersistentCacheFromCache> rw_pcache{
new PersistentCacheFromCache(rw_cache, /*read_only*/ false)};
// Exercise some obscure behavior with read-only wrappers
std::shared_ptr<Cache> ro_cache{new ReadOnlyCacheWrapper(rw_cache)};
std::shared_ptr<PersistentCacheFromCache> ro_pcache{
new PersistentCacheFromCache(rw_cache, /*read_only*/ true)};
// Simple same pointer
table_options.block_cache = rw_cache;
table_options.block_cache_compressed = rw_cache;
table_options.persistent_cache.reset();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: block_cache same as block_cache_compressed not "
"currently supported, and would be bad for performance anyway");
// Other cases
table_options.block_cache = ro_cache;
table_options.block_cache_compressed = rw_cache;
table_options.persistent_cache.reset();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: block_cache and block_cache_compressed share "
"the same key space, which is not supported");
table_options.block_cache = rw_cache;
table_options.block_cache_compressed = ro_cache;
table_options.persistent_cache.reset();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: block_cache_compressed and block_cache share "
"the same key space, which is not supported");
table_options.block_cache = ro_cache;
table_options.block_cache_compressed.reset();
table_options.persistent_cache = rw_pcache;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: block_cache and persistent_cache share the same "
"key space, which is not supported");
table_options.block_cache = rw_cache;
table_options.block_cache_compressed.reset();
table_options.persistent_cache = ro_pcache;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: persistent_cache and block_cache share the same "
"key space, which is not supported");
table_options.block_cache.reset();
table_options.no_block_cache = true;
table_options.block_cache_compressed = ro_cache;
table_options.persistent_cache = rw_pcache;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: block_cache_compressed and persistent_cache "
"share the same key space, which is not supported");
table_options.block_cache.reset();
table_options.no_block_cache = true;
table_options.block_cache_compressed = rw_cache;
table_options.persistent_cache = ro_pcache;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_EQ(TryReopen(options).ToString(),
"Invalid argument: persistent_cache and block_cache_compressed "
"share the same key space, which is not supported");
}
#endif // SNAPPY
#ifndef ROCKSDB_LITE
@ -1086,124 +899,6 @@ TEST_F(DBBlockCacheTest, ParanoidFileChecks) {
TestGetTickerCount(options, BLOCK_CACHE_ADD));
}
TEST_F(DBBlockCacheTest, CompressedCache) {
if (!Snappy_Supported()) {
return;
}
int num_iter = 80;
// Run this test three iterations.
// Iteration 1: only a uncompressed block cache
// Iteration 2: only a compressed block cache
// Iteration 3: both block cache and compressed cache
// Iteration 4: both block cache and compressed cache, but DB is not
// compressed
for (int iter = 0; iter < 4; iter++) {
Options options = CurrentOptions();
options.write_buffer_size = 64 * 1024; // small write buffer
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions table_options;
switch (iter) {
case 0:
// only uncompressed block cache
table_options.block_cache = NewLRUCache(8 * 1024);
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 1:
// no block cache, only compressed cache
table_options.no_block_cache = true;
table_options.block_cache = nullptr;
table_options.block_cache_compressed = NewLRUCache(8 * 1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 2:
// both compressed and uncompressed block cache
table_options.block_cache = NewLRUCache(1024);
table_options.block_cache_compressed = NewLRUCache(8 * 1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 3:
// both block cache and compressed cache, but DB is not compressed
// also, make block cache sizes bigger, to trigger block cache hits
table_options.block_cache = NewLRUCache(1024 * 1024);
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.compression = kNoCompression;
break;
default:
FAIL();
}
CreateAndReopenWithCF({"pikachu"}, options);
// default column family doesn't have block cache
Options no_block_cache_opts;
no_block_cache_opts.statistics = options.statistics;
no_block_cache_opts = CurrentOptions(no_block_cache_opts);
BlockBasedTableOptions table_options_no_bc;
table_options_no_bc.no_block_cache = true;
no_block_cache_opts.table_factory.reset(
NewBlockBasedTableFactory(table_options_no_bc));
ReopenWithColumnFamilies(
{"default", "pikachu"},
std::vector<Options>({no_block_cache_opts, options}));
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
std::vector<std::string> values;
std::string str;
for (int i = 0; i < num_iter; i++) {
if (i % 4 == 0) { // high compression ratio
str = rnd.RandomString(1000);
}
values.push_back(str);
ASSERT_OK(Put(1, Key(i), values[i]));
}
// flush all data from memtable so that reads are from block cache
ASSERT_OK(Flush(1));
for (int i = 0; i < num_iter; i++) {
ASSERT_EQ(Get(1, Key(i)), values[i]);
}
// check that we triggered the appropriate code paths in the cache
switch (iter) {
case 0:
// only uncompressed block cache
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 1:
// no block cache, only compressed cache
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 2:
// both compressed and uncompressed block cache
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 3:
// both compressed and uncompressed block cache
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
// compressed doesn't have any hits since blocks are not compressed on
// storage
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0);
break;
default:
FAIL();
}
options.create_if_missing = true;
DestroyAndReopen(options);
}
}
TEST_F(DBBlockCacheTest, CacheCompressionDict) {
const int kNumFiles = 4;
const int kNumEntriesPerFile = 128;
@ -1708,20 +1403,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
uint64_t expected_stat = 0;
std::function<void()> verify_stats;
if (use_compressed_cache_) {
if (!Snappy_Supported()) {
ROCKSDB_GTEST_SKIP("Compressed cache test requires snappy support");
return;
}
options.compression = CompressionType::kSnappyCompression;
table_options.no_block_cache = true;
table_options.block_cache_compressed = NewLRUCache(1 << 25, 0, false);
verify_stats = [&options, &expected_stat] {
// One for ordinary SST file and one for external SST file
ASSERT_EQ(expected_stat,
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_ADD));
};
} else {
table_options.cache_index_and_filter_blocks = true;
table_options.block_cache = NewLRUCache(1 << 25, 0, false);
verify_stats = [&options, &expected_stat] {
@ -1732,7 +1413,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
ASSERT_EQ(expected_stat,
options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD));
};
}
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
@ -1802,12 +1482,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
IngestExternalFileOptions ingest_opts;
ASSERT_OK(db_->IngestExternalFile(handles_[1], {f}, ingest_opts));
}
if (exclude_file_numbers_) {
// FIXME(peterd): figure out where these extra ADDs are coming from
options.statistics->recordTick(BLOCK_CACHE_COMPRESSED_ADD,
uint64_t{0} - uint64_t{2});
}
#endif
perform_gets();

@ -241,7 +241,6 @@ TEST_P(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
iterh = nullptr;
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
ReopenWithColumnFamilies({"default", "pikachu"}, options);
read_options.read_tier = kBlockCacheTier;

@ -106,66 +106,6 @@ TEST_F(DBTest2, OpenForReadOnlyWithColumnFamilies) {
ASSERT_NOK(env_->FileExists(dbname));
}
class TestReadOnlyWithCompressedCache
: public DBTestBase,
public testing::WithParamInterface<std::tuple<int, bool>> {
public:
TestReadOnlyWithCompressedCache()
: DBTestBase("test_readonly_with_compressed_cache",
/*env_do_fsync=*/true) {
max_open_files_ = std::get<0>(GetParam());
use_mmap_ = std::get<1>(GetParam());
}
int max_open_files_;
bool use_mmap_;
};
TEST_P(TestReadOnlyWithCompressedCache, ReadOnlyWithCompressedCache) {
if (use_mmap_ && !IsMemoryMappedAccessSupported()) {
ROCKSDB_GTEST_SKIP("Test requires MMAP support");
return;
}
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foo2", "barbarbarbarbarbarbarbar"));
ASSERT_OK(Flush());
DB* db_ptr = nullptr;
Options options = CurrentOptions();
options.allow_mmap_reads = use_mmap_;
options.max_open_files = max_open_files_;
options.compression = kSnappyCompression;
BlockBasedTableOptions table_options;
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
table_options.no_block_cache = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.statistics = CreateDBStatistics();
ASSERT_OK(DB::OpenForReadOnly(options, dbname_, &db_ptr));
std::string v;
ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("bar", v);
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("bar", v);
if (Snappy_Supported()) {
if (use_mmap_) {
ASSERT_EQ(0,
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
} else {
ASSERT_EQ(1,
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
}
}
delete db_ptr;
}
INSTANTIATE_TEST_CASE_P(TestReadOnlyWithCompressedCache,
TestReadOnlyWithCompressedCache,
::testing::Combine(::testing::Values(-1, 100),
::testing::Bool()));
class PartitionedIndexTestListener : public EventListener {
public:
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
@ -2625,7 +2565,6 @@ TEST_F(DBTest2, PersistentCache) {
new MockPersistentCache(type, 10 * 1024));
table_options.no_block_cache = true;
table_options.block_cache = bsize ? NewLRUCache(bsize) : nullptr;
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options);

@ -480,10 +480,6 @@ Options DBTestBase::GetOptions(
options.compaction_style = kCompactionStyleUniversal;
options.num_levels = 8;
break;
case kCompressedBlockCache:
options.allow_mmap_writes = can_allow_mmap;
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
break;
case kInfiniteMaxOpenFiles:
options.max_open_files = -1;
break;

@ -1056,16 +1056,15 @@ class DBTestBase : public testing::Test {
kHashSkipList = 18,
kUniversalCompaction = 19,
kUniversalCompactionMultiLevel = 20,
kCompressedBlockCache = 21,
kInfiniteMaxOpenFiles = 22,
kCRC32cChecksum = 23,
kFIFOCompaction = 24,
kOptimizeFiltersForHits = 25,
kRowCache = 26,
kRecycleLogFiles = 27,
kConcurrentSkipList = 28,
kPipelinedWrite = 29,
kConcurrentWALWrites = 30,
kInfiniteMaxOpenFiles = 21,
kCRC32cChecksum = 22,
kFIFOCompaction = 23,
kOptimizeFiltersForHits = 24,
kRowCache = 25,
kRecycleLogFiles = 26,
kConcurrentSkipList = 27,
kPipelinedWrite = 28,
kConcurrentWALWrites = 29,
kDirectIO,
kLevelSubcompactions,
kBlockBasedTableWithIndexRestartInterval,

@ -52,8 +52,6 @@ std::shared_ptr<const FilterPolicy> CreateFilterPolicy() {
StressTest::StressTest()
: cache_(NewCache(FLAGS_cache_size, FLAGS_cache_numshardbits)),
compressed_cache_(NewLRUCache(FLAGS_compressed_cache_size,
FLAGS_compressed_cache_numshardbits)),
filter_policy_(CreateFilterPolicy()),
db_(nullptr),
#ifndef ROCKSDB_LITE
@ -2530,10 +2528,9 @@ void StressTest::Open(SharedState* shared) {
(void)shared;
#endif
if (!InitializeOptionsFromFile(options_)) {
InitializeOptionsFromFlags(cache_, compressed_cache_, filter_policy_,
options_);
InitializeOptionsFromFlags(cache_, filter_policy_, options_);
}
InitializeOptionsGeneral(cache_, compressed_cache_, filter_policy_, options_);
InitializeOptionsGeneral(cache_, filter_policy_, options_);
if (FLAGS_prefix_size == 0 && FLAGS_rep_factory == kHashSkipList) {
fprintf(stderr,
@ -3041,7 +3038,6 @@ bool InitializeOptionsFromFile(Options& options) {
void InitializeOptionsFromFlags(
const std::shared_ptr<Cache>& cache,
const std::shared_ptr<Cache>& block_cache_compressed,
const std::shared_ptr<const FilterPolicy>& filter_policy,
Options& options) {
BlockBasedTableOptions block_based_options;
@ -3054,7 +3050,6 @@ void InitializeOptionsFromFlags(
static_cast<PinningTier>(FLAGS_partition_pinning);
block_based_options.metadata_cache_options.unpartitioned_pinning =
static_cast<PinningTier>(FLAGS_unpartitioned_pinning);
block_based_options.block_cache_compressed = block_cache_compressed;
block_based_options.checksum = checksum_type_e;
block_based_options.block_size = FLAGS_block_size;
block_based_options.cache_usage_options.options_overrides.insert(
@ -3305,7 +3300,6 @@ void InitializeOptionsFromFlags(
void InitializeOptionsGeneral(
const std::shared_ptr<Cache>& cache,
const std::shared_ptr<Cache>& block_cache_compressed,
const std::shared_ptr<const FilterPolicy>& filter_policy,
Options& options) {
options.create_missing_column_families = true;
@ -3326,10 +3320,6 @@ void InitializeOptionsGeneral(
if (FLAGS_cache_size > 0) {
table_options->block_cache = cache;
}
if (!table_options->block_cache_compressed &&
FLAGS_compressed_cache_size > 0) {
table_options->block_cache_compressed = block_cache_compressed;
}
if (!table_options->filter_policy) {
table_options->filter_policy = filter_policy;
}

@ -298,7 +298,6 @@ extern bool InitializeOptionsFromFile(Options& options);
// input arguments.
extern void InitializeOptionsFromFlags(
const std::shared_ptr<Cache>& cache,
const std::shared_ptr<Cache>& block_cache_compressed,
const std::shared_ptr<const FilterPolicy>& filter_policy, Options& options);
// Initialize `options` on which `InitializeOptionsFromFile()` and
@ -324,7 +323,6 @@ extern void InitializeOptionsFromFlags(
// from OPTIONS file.
extern void InitializeOptionsGeneral(
const std::shared_ptr<Cache>& cache,
const std::shared_ptr<Cache>& block_cache_compressed,
const std::shared_ptr<const FilterPolicy>& filter_policy, Options& options);
// If no OPTIONS file is specified, set up `options` so that we can test

@ -1001,10 +1001,6 @@ extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache(
extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache(
rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache);
extern ROCKSDB_LIBRARY_API void
rocksdb_block_based_options_set_block_cache_compressed(
rocksdb_block_based_table_options_t* options,
rocksdb_cache_t* block_cache_compressed);
extern ROCKSDB_LIBRARY_API void
rocksdb_block_based_options_set_whole_key_filtering(
rocksdb_block_based_table_options_t*, unsigned char);
extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version(

@ -266,15 +266,6 @@ struct BlockBasedTableOptions {
// IF NULL, no page cache is used
std::shared_ptr<PersistentCache> persistent_cache = nullptr;
// DEPRECATED: This feature is planned for removal in a future release.
// Use SecondaryCache instead.
//
// If non-NULL use the specified cache for compressed blocks.
// If NULL, rocksdb will not use a compressed block cache.
// Note: though it looks similar to `block_cache`, RocksDB doesn't put the
// same type of object there.
std::shared_ptr<Cache> block_cache_compressed = nullptr;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if

@ -55,8 +55,8 @@ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
jbyte jdata_block_index_type_value,
jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value,
jboolean jno_block_cache, jlong jblock_cache_handle,
jlong jpersistent_cache_handle, jlong jblock_cache_compressed_handle,
jlong jblock_size, jint jblock_size_deviation, jint jblock_restart_interval,
jlong jpersistent_cache_handle, jlong jblock_size,
jint jblock_size_deviation, jint jblock_restart_interval,
jint jindex_block_restart_interval, jlong jmetadata_block_size,
jboolean jpartition_filters, jboolean joptimize_filters_for_memory,
jboolean juse_delta_encoding, jlong jfilter_policy_handle,
@ -64,8 +64,7 @@ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
jint jread_amp_bytes_per_bit, jint jformat_version,
jboolean jenable_index_compression, jboolean jblock_align,
jbyte jindex_shortening, jlong jblock_cache_size,
jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size,
jint jblock_cache_compressed_num_shard_bits) {
jint jblock_cache_num_shard_bits) {
ROCKSDB_NAMESPACE::BlockBasedTableOptions options;
options.cache_index_and_filter_blocks =
static_cast<bool>(jcache_index_and_filter_blocks);
@ -113,21 +112,6 @@ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
jpersistent_cache_handle);
options.persistent_cache = *pCache;
}
if (jblock_cache_compressed_handle > 0) {
std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *pCache =
reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *>(
jblock_cache_compressed_handle);
options.block_cache_compressed = *pCache;
} else if (jblock_cache_compressed_size > 0) {
if (jblock_cache_compressed_num_shard_bits > 0) {
options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache(
static_cast<size_t>(jblock_cache_compressed_size),
static_cast<int>(jblock_cache_compressed_num_shard_bits));
} else {
options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache(
static_cast<size_t>(jblock_cache_compressed_size));
}
}
options.block_size = static_cast<size_t>(jblock_size);
options.block_size_deviation = static_cast<int>(jblock_size_deviation);
options.block_restart_interval = static_cast<int>(jblock_restart_interval);

@ -92,8 +92,7 @@ public class RocksDBSample {
.setFilterPolicy(bloomFilter)
.setBlockSizeDeviation(5)
.setBlockRestartInterval(10)
.setCacheIndexAndFilterBlocks(true)
.setBlockCacheCompressed(new LRUCache(64 * 1000, 10));
.setCacheIndexAndFilterBlocks(true);
assert (table_options.blockSizeDeviation() == 5);
assert (table_options.blockRestartInterval() == 10);

@ -25,7 +25,6 @@ public class BlockBasedTableConfig extends TableFormatConfig {
noBlockCache = false;
blockCache = null;
persistentCache = null;
blockCacheCompressed = null;
blockSize = 4 * 1024;
blockSizeDeviation = 10;
blockRestartInterval = 16;
@ -46,10 +45,6 @@ public class BlockBasedTableConfig extends TableFormatConfig {
// NOTE: ONLY used if blockCache == null
blockCacheSize = 8 * 1024 * 1024;
blockCacheNumShardBits = 0;
// NOTE: ONLY used if blockCacheCompressed == null
blockCacheCompressedSize = 0;
blockCacheCompressedNumShardBits = 0;
}
/**
@ -295,31 +290,6 @@ public class BlockBasedTableConfig extends TableFormatConfig {
return this;
}
/**
* Use the specified cache for compressed blocks.
*
* If {@code null}, RocksDB will not use a compressed block cache.
*
* Note: though it looks similar to {@link #setBlockCache(Cache)}, RocksDB
* doesn't put the same type of object there.
*
* {@link org.rocksdb.Cache} should not be disposed before options instances
* using this cache is disposed.
*
* {@link org.rocksdb.Cache} instance can be re-used in multiple options
* instances.
*
* @param blockCacheCompressed {@link org.rocksdb.Cache} Cache java instance
* (e.g. LRUCache).
*
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockCacheCompressed(
final Cache blockCacheCompressed) {
this.blockCacheCompressed = blockCacheCompressed;
return this;
}
/**
* Get the approximate size of user data packed per block.
*
@ -859,64 +829,6 @@ public class BlockBasedTableConfig extends TableFormatConfig {
return this;
}
/**
* Size of compressed block cache. If 0, then block_cache_compressed is set
* to null.
*
* @return size of compressed block cache.
*/
@Deprecated
public long blockCacheCompressedSize() {
return blockCacheCompressedSize;
}
/**
* Size of compressed block cache. If 0, then block_cache_compressed is set
* to null.
*
* @param blockCacheCompressedSize of compressed block cache.
* @return the reference to the current config.
*
* @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
*/
@Deprecated
public BlockBasedTableConfig setBlockCacheCompressedSize(
final long blockCacheCompressedSize) {
this.blockCacheCompressedSize = blockCacheCompressedSize;
return this;
}
/**
* Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative.
*
* @return numShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings.
*/
@Deprecated
public int blockCacheCompressedNumShardBits() {
return blockCacheCompressedNumShardBits;
}
/**
* Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative.
*
* @param blockCacheCompressedNumShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
*
* @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
*/
@Deprecated
public BlockBasedTableConfig setBlockCacheCompressedNumShardBits(
final int blockCacheCompressedNumShardBits) {
this.blockCacheCompressedNumShardBits = blockCacheCompressedNumShardBits;
return this;
}
/**
* Influence the behavior when kHashSearch is used.
* if false, stores a precise prefix to block range mapping
@ -977,23 +889,15 @@ public class BlockBasedTableConfig extends TableFormatConfig {
persistentCacheHandle = 0;
}
final long blockCacheCompressedHandle;
if (blockCacheCompressed != null) {
blockCacheCompressedHandle = blockCacheCompressed.nativeHandle_;
} else {
blockCacheCompressedHandle = 0;
}
return newTableFactoryHandle(cacheIndexAndFilterBlocks,
cacheIndexAndFilterBlocksWithHighPriority, pinL0FilterAndIndexBlocksInCache,
pinTopLevelIndexAndFilter, indexType.getValue(), dataBlockIndexType.getValue(),
dataBlockHashTableUtilRatio, checksumType.getValue(), noBlockCache, blockCacheHandle,
persistentCacheHandle, blockCacheCompressedHandle, blockSize, blockSizeDeviation,
blockRestartInterval, indexBlockRestartInterval, metadataBlockSize, partitionFilters,
optimizeFiltersForMemory, useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering,
verifyCompression, readAmpBytesPerBit, formatVersion, enableIndexCompression, blockAlign,
indexShortening.getValue(), blockCacheSize, blockCacheNumShardBits,
blockCacheCompressedSize, blockCacheCompressedNumShardBits);
persistentCacheHandle, blockSize, blockSizeDeviation, blockRestartInterval,
indexBlockRestartInterval, metadataBlockSize, partitionFilters, optimizeFiltersForMemory,
useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering, verifyCompression,
readAmpBytesPerBit, formatVersion, enableIndexCompression, blockAlign,
indexShortening.getValue(), blockCacheSize, blockCacheNumShardBits);
}
private native long newTableFactoryHandle(final boolean cacheIndexAndFilterBlocks,
@ -1002,18 +906,15 @@ public class BlockBasedTableConfig extends TableFormatConfig {
final byte indexTypeValue, final byte dataBlockIndexTypeValue,
final double dataBlockHashTableUtilRatio, final byte checksumTypeValue,
final boolean noBlockCache, final long blockCacheHandle, final long persistentCacheHandle,
final long blockCacheCompressedHandle, final long blockSize, final int blockSizeDeviation,
final int blockRestartInterval, final int indexBlockRestartInterval,
final long metadataBlockSize, final boolean partitionFilters,
final boolean optimizeFiltersForMemory, final boolean useDeltaEncoding,
final long filterPolicyHandle, final boolean wholeKeyFiltering,
final boolean verifyCompression, final int readAmpBytesPerBit, final int formatVersion,
final boolean enableIndexCompression, final boolean blockAlign, final byte indexShortening,
@Deprecated final long blockCacheSize, @Deprecated final int blockCacheNumShardBits,
final long blockSize, final int blockSizeDeviation, final int blockRestartInterval,
final int indexBlockRestartInterval, final long metadataBlockSize,
final boolean partitionFilters, final boolean optimizeFiltersForMemory,
final boolean useDeltaEncoding, final long filterPolicyHandle,
final boolean wholeKeyFiltering, final boolean verifyCompression,
final int readAmpBytesPerBit, final int formatVersion, final boolean enableIndexCompression,
final boolean blockAlign, final byte indexShortening,
@Deprecated final long blockCacheCompressedSize,
@Deprecated final int blockCacheCompressedNumShardBits);
@Deprecated final long blockCacheSize, @Deprecated final int blockCacheNumShardBits);
//TODO(AR) flushBlockPolicyFactory
private boolean cacheIndexAndFilterBlocks;
@ -1027,7 +928,6 @@ public class BlockBasedTableConfig extends TableFormatConfig {
private boolean noBlockCache;
private Cache blockCache;
private PersistentCache persistentCache;
private Cache blockCacheCompressed;
private long blockSize;
private int blockSizeDeviation;
private int blockRestartInterval;
@ -1048,8 +948,4 @@ public class BlockBasedTableConfig extends TableFormatConfig {
// NOTE: ONLY used if blockCache == null
@Deprecated private long blockCacheSize;
@Deprecated private int blockCacheNumShardBits;
// NOTE: ONLY used if blockCacheCompressed == null
@Deprecated private long blockCacheCompressedSize;
@Deprecated private int blockCacheCompressedNumShardBits;
}

@ -29,7 +29,7 @@ public class OptionsUtil {
* For table_factory, this function further supports deserializing
* BlockBasedTableFactory and its BlockBasedTableOptions except the
* pointer options of BlockBasedTableOptions (flush_block_policy_factory,
* block_cache, and block_cache_compressed), which will be initialized with
* and block_cache), which will be initialized with
* default values. Developers can further specify these three options by
* casting the return value of TableFactoroy::GetOptions() to
* BlockBasedTableOptions and making necessary changes.

@ -230,63 +230,6 @@ public class BlockBasedTableConfigTest {
}
}
@Test
public void blockCacheCompressed() {
try (final Cache cache = new LRUCache(17 * 1024 * 1024);
final Options options = new Options().setTableFormatConfig(
new BlockBasedTableConfig().setBlockCacheCompressed(cache))) {
assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
}
}
@Ignore("See issue: https://github.com/facebook/rocksdb/issues/4822")
@Test
public void blockCacheCompressedIntegration() throws RocksDBException {
final byte[] key1 = "some-key1".getBytes(StandardCharsets.UTF_8);
final byte[] key2 = "some-key1".getBytes(StandardCharsets.UTF_8);
final byte[] key3 = "some-key1".getBytes(StandardCharsets.UTF_8);
final byte[] key4 = "some-key1".getBytes(StandardCharsets.UTF_8);
final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
try (final Cache compressedCache = new LRUCache(8 * 1024 * 1024);
final Statistics statistics = new Statistics()) {
final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig()
.setNoBlockCache(true)
.setBlockCache(null)
.setBlockCacheCompressed(compressedCache)
.setFormatVersion(4);
try (final Options options = new Options()
.setCreateIfMissing(true)
.setStatistics(statistics)
.setTableFormatConfig(blockBasedTableConfig)) {
for (int shard = 0; shard < 8; shard++) {
try (final FlushOptions flushOptions = new FlushOptions();
final WriteOptions writeOptions = new WriteOptions();
final ReadOptions readOptions = new ReadOptions();
final RocksDB db =
RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) {
db.put(writeOptions, key1, value);
db.put(writeOptions, key2, value);
db.put(writeOptions, key3, value);
db.put(writeOptions, key4, value);
db.flush(flushOptions);
db.get(readOptions, key1);
db.get(readOptions, key2);
db.get(readOptions, key3);
db.get(readOptions, key4);
assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_COMPRESSED_ADD)).isEqualTo(shard + 1);
}
}
}
}
}
@Test
public void blockSize() {
final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
@ -470,21 +413,4 @@ public class BlockBasedTableConfigTest {
isEqualTo(5);
}
@Deprecated
@Test
public void blockCacheCompressedSize() {
final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
blockBasedTableConfig.setBlockCacheCompressedSize(40);
assertThat(blockBasedTableConfig.blockCacheCompressedSize()).
isEqualTo(40);
}
@Deprecated
@Test
public void blockCacheCompressedNumShardBits() {
final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4);
assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()).
isEqualTo(4);
}
}

@ -126,8 +126,6 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
sizeof(std::shared_ptr<Cache>)},
{offsetof(struct BlockBasedTableOptions, persistent_cache),
sizeof(std::shared_ptr<PersistentCache>)},
{offsetof(struct BlockBasedTableOptions, block_cache_compressed),
sizeof(std::shared_ptr<Cache>)},
{offsetof(struct BlockBasedTableOptions, cache_usage_options),
sizeof(CacheUsageOptions)},
{offsetof(struct BlockBasedTableOptions, filter_policy),
@ -207,7 +205,6 @@ TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
kBbtoExcluded));
ASSERT_TRUE(new_bbto->block_cache.get() != nullptr);
ASSERT_TRUE(new_bbto->block_cache_compressed.get() != nullptr);
ASSERT_TRUE(new_bbto->filter_policy.get() != nullptr);
bbto->~BlockBasedTableOptions();

@ -928,8 +928,6 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
ASSERT_TRUE(new_opt.block_cache != nullptr);
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL);
ASSERT_EQ(new_opt.block_size, 1024UL);
ASSERT_EQ(new_opt.block_size_deviation, 8);
ASSERT_EQ(new_opt.block_restart_interval, 4);
@ -1070,16 +1068,6 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
4);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache_compressed)->GetHighPriPoolRatio(),
0.5);
// Set only block cache capacity. Check other values are
// reset to default values.
@ -1098,18 +1086,6 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
->GetHighPriPoolRatio(),
0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 2*1024UL*1024UL);
// Default values
ASSERT_EQ(
std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
GetDefaultCacheShardBits(new_opt.block_cache_compressed->GetCapacity()));
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.5);
// Set couple of block cache options.
ASSERT_OK(GetBlockBasedTableOptionsFromString(
@ -1125,16 +1101,6 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 0);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
5);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.0);
// Set couple of block cache options.
ASSERT_OK(GetBlockBasedTableOptionsFromString(
@ -1153,16 +1119,6 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
->GetHighPriPoolRatio(),
0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
4);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.5);
ASSERT_OK(GetBlockBasedTableOptionsFromString(
config_options, table_opt, "filter_policy=rocksdb.BloomFilter:1.234",
@ -2916,8 +2872,6 @@ TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
ASSERT_TRUE(new_opt.no_block_cache);
ASSERT_TRUE(new_opt.block_cache != nullptr);
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL);
ASSERT_EQ(new_opt.block_size, 1024UL);
ASSERT_EQ(new_opt.block_size_deviation, 8);
ASSERT_EQ(new_opt.block_restart_interval, 4);
@ -2986,16 +2940,6 @@ TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
4);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache_compressed)->GetHighPriPoolRatio(),
0.5);
// Set only block cache capacity. Check other values are
// reset to default values.
@ -3013,18 +2957,6 @@ TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
->GetHighPriPoolRatio(),
0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 2*1024UL*1024UL);
// Default values
ASSERT_EQ(
std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
GetDefaultCacheShardBits(new_opt.block_cache_compressed->GetCapacity()));
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.5);
// Set couple of block cache options.
ASSERT_OK(GetBlockBasedTableOptionsFromString(
@ -3040,16 +2972,6 @@ TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 0);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
5);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.0);
// Set couple of block cache options.
ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt,
@ -3067,16 +2989,6 @@ TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
->GetHighPriPoolRatio(),
0.5);
ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL);
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(
new_opt.block_cache_compressed)
->GetNumShardBits(),
4);
ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true);
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache_compressed)
->GetHighPriPoolRatio(),
0.5);
}
TEST_F(OptionsOldApiTest, GetPlainTableOptionsFromString) {

@ -1284,7 +1284,6 @@ void BlockBasedTableBuilder::WriteMaybeCompressedBlock(
}
{
Status s = Status::OK();
bool warm_cache;
switch (r->table_options.prepopulate_block_cache) {
case BlockBasedTableOptions::PrepopulateBlockCache::kFlushOnly:
@ -1299,18 +1298,13 @@ void BlockBasedTableBuilder::WriteMaybeCompressedBlock(
warm_cache = false;
}
if (warm_cache) {
s = InsertBlockInCacheHelper(*uncompressed_block_data, handle,
Status s = InsertBlockInCacheHelper(*uncompressed_block_data, handle,
block_type);
if (!s.ok()) {
r->SetStatus(s);
return;
}
}
s = InsertBlockInCompressedCache(block_contents, type, handle);
if (!s.ok()) {
r->SetStatus(s);
return;
}
}
r->set_offset(r->get_offset() + block_contents.size() + kBlockTrailerSize);
@ -1422,47 +1416,6 @@ IOStatus BlockBasedTableBuilder::io_status() const {
return rep_->GetIOStatus();
}
//
// Make a copy of the block contents and insert into compressed block cache
//
Status BlockBasedTableBuilder::InsertBlockInCompressedCache(
const Slice& block_contents, const CompressionType type,
const BlockHandle* handle) {
Rep* r = rep_;
CompressedBlockCacheInterface block_cache_compressed{
r->table_options.block_cache_compressed.get()};
Status s;
if (type != kNoCompression && block_cache_compressed) {
size_t size = block_contents.size();
auto ubuf = AllocateBlock(size + 1,
block_cache_compressed.get()->memory_allocator());
memcpy(ubuf.get(), block_contents.data(), size);
ubuf[size] = type;
BlockContents* block_contents_to_cache =
new BlockContents(std::move(ubuf), size);
#ifndef NDEBUG
block_contents_to_cache->has_trailer = true;
#endif // NDEBUG
CacheKey key = BlockBasedTable::GetCacheKey(rep_->base_cache_key, *handle);
s = block_cache_compressed.Insert(
key.AsSlice(), block_contents_to_cache,
block_contents_to_cache->ApproximateMemoryUsage());
if (s.ok()) {
RecordTick(rep_->ioptions.stats, BLOCK_CACHE_COMPRESSED_ADD);
} else {
RecordTick(rep_->ioptions.stats, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
}
// Invalidate OS cache.
r->file->InvalidateCache(static_cast<size_t>(r->get_offset()), size)
.PermitUncheckedError();
}
return s;
}
Status BlockBasedTableBuilder::InsertBlockInCacheHelper(
const Slice& block_contents, const BlockHandle* handle,
BlockType block_type) {

@ -233,7 +233,6 @@ static std::unordered_map<std::string, OptionTypeInfo>
#ifndef ROCKSDB_LITE
/* currently not supported
std::shared_ptr<Cache> block_cache = nullptr;
std::shared_ptr<Cache> block_cache_compressed = nullptr;
CacheUsageOptions cache_usage_options;
*/
{"flush_block_policy_factory",
@ -394,15 +393,8 @@ static std::unordered_map<std::string, OptionTypeInfo>
return Cache::CreateFromString(opts, value, cache);
}}},
{"block_cache_compressed",
{offsetof(struct BlockBasedTableOptions, block_cache_compressed),
OptionType::kUnknown, OptionVerificationType::kNormal,
(OptionTypeFlags::kCompareNever | OptionTypeFlags::kDontSerialize),
// Parses the input value as a Cache
[](const ConfigOptions& opts, const std::string&,
const std::string& value, void* addr) {
auto* cache = static_cast<std::shared_ptr<Cache>*>(addr);
return Cache::CreateFromString(opts, value, cache);
}}},
{0, OptionType::kUnknown, OptionVerificationType::kDeprecated,
OptionTypeFlags::kNone}},
{"max_auto_readahead_size",
{offsetof(struct BlockBasedTableOptions, max_auto_readahead_size),
OptionType::kSizeT, OptionVerificationType::kNormal,
@ -509,20 +501,12 @@ namespace {
// they must not share an underlying key space with each other.
Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) {
int cache_count = (bbto.block_cache != nullptr) +
(bbto.block_cache_compressed != nullptr) +
(bbto.persistent_cache != nullptr);
if (cache_count <= 1) {
// Nothing to share / overlap
return Status::OK();
}
// Simple pointer equality
if (bbto.block_cache == bbto.block_cache_compressed) {
return Status::InvalidArgument(
"block_cache same as block_cache_compressed not currently supported, "
"and would be bad for performance anyway");
}
// More complex test of shared key space, in case the instances are wrappers
// for some shared underlying cache.
static Cache::CacheItemHelper kHelper{CacheEntryRole::kMisc};
@ -532,19 +516,12 @@ Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) {
char c;
};
static SentinelValue kRegularBlockCacheMarker{'b'};
static SentinelValue kCompressedBlockCacheMarker{'c'};
static char kPersistentCacheMarker{'p'};
if (bbto.block_cache) {
bbto.block_cache
->Insert(sentinel_key.AsSlice(), &kRegularBlockCacheMarker, &kHelper, 1)
.PermitUncheckedError();
}
if (bbto.block_cache_compressed) {
bbto.block_cache_compressed
->Insert(sentinel_key.AsSlice(), &kCompressedBlockCacheMarker, &kHelper,
1)
.PermitUncheckedError();
}
if (bbto.persistent_cache) {
// Note: persistent cache copies the data, not keeping the pointer
bbto.persistent_cache
@ -559,11 +536,7 @@ Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) {
auto v = static_cast<SentinelValue*>(bbto.block_cache->Value(handle));
char c = v->c;
bbto.block_cache->Release(handle);
if (v == &kCompressedBlockCacheMarker) {
return Status::InvalidArgument(
"block_cache and block_cache_compressed share the same key space, "
"which is not supported");
} else if (c == kPersistentCacheMarker) {
if (c == kPersistentCacheMarker) {
return Status::InvalidArgument(
"block_cache and persistent_cache share the same key space, "
"which is not supported");
@ -572,28 +545,7 @@ Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) {
}
}
}
if (bbto.block_cache_compressed) {
auto handle = bbto.block_cache_compressed->Lookup(sentinel_key.AsSlice());
if (handle) {
auto v = static_cast<SentinelValue*>(
bbto.block_cache_compressed->Value(handle));
char c = v->c;
bbto.block_cache_compressed->Release(handle);
if (v == &kRegularBlockCacheMarker) {
return Status::InvalidArgument(
"block_cache_compressed and block_cache share the same key space, "
"which is not supported");
} else if (c == kPersistentCacheMarker) {
return Status::InvalidArgument(
"block_cache_compressed and persistent_cache share the same key "
"space, "
"which is not supported");
} else if (v != &kCompressedBlockCacheMarker) {
return Status::Corruption(
"Unexpected mutation to block_cache_compressed");
}
}
}
if (bbto.persistent_cache) {
std::unique_ptr<char[]> data;
size_t size = 0;
@ -604,11 +556,6 @@ Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) {
return Status::InvalidArgument(
"persistent_cache and block_cache share the same key space, "
"which is not supported");
} else if (data[0] == kCompressedBlockCacheMarker.c) {
return Status::InvalidArgument(
"persistent_cache and block_cache_compressed share the same key "
"space, "
"which is not supported");
} else if (data[0] != kPersistentCacheMarker) {
return Status::Corruption("Unexpected mutation to persistent_cache");
}
@ -828,20 +775,6 @@ std::string BlockBasedTableFactory::GetPrintableOptions() const {
ret.append(" block_cache_options:\n");
ret.append(table_options_.block_cache->GetPrintableOptions());
}
snprintf(buffer, kBufferSize, " block_cache_compressed: %p\n",
static_cast<void*>(table_options_.block_cache_compressed.get()));
ret.append(buffer);
if (table_options_.block_cache_compressed) {
const char* block_cache_compressed_name =
table_options_.block_cache_compressed->Name();
if (block_cache_compressed_name != nullptr) {
snprintf(buffer, kBufferSize, " block_cache_name: %s\n",
block_cache_compressed_name);
ret.append(buffer);
}
ret.append(" block_cache_compressed_options:\n");
ret.append(table_options_.block_cache_compressed->GetPrintableOptions());
}
snprintf(buffer, kBufferSize, " persistent_cache: %p\n",
static_cast<void*>(table_options_.persistent_cache.get()));
ret.append(buffer);

@ -1257,10 +1257,7 @@ Status BlockBasedTable::ReadMetaIndexBlock(
template <typename TBlocklike>
WithBlocklikeCheck<Status, TBlocklike> BlockBasedTable::GetDataBlockFromCache(
const Slice& cache_key, BlockCacheInterface<TBlocklike> block_cache,
CompressedBlockCacheInterface block_cache_compressed,
const ReadOptions& read_options,
CachableEntry<TBlocklike>* out_parsed_block,
const UncompressionDict& uncompression_dict, const bool wait,
CachableEntry<TBlocklike>* out_parsed_block, const bool wait,
GetContext* get_context) const {
assert(out_parsed_block);
assert(out_parsed_block->IsEmpty());
@ -1306,73 +1303,12 @@ WithBlocklikeCheck<Status, TBlocklike> BlockBasedTable::GetDataBlockFromCache(
// If not found, search from the compressed block cache.
assert(out_parsed_block->IsEmpty());
if (!block_cache_compressed) {
return s;
}
assert(!cache_key.empty());
BlockContents contents;
auto block_cache_compressed_handle =
block_cache_compressed.Lookup(cache_key, statistics);
// if we found in the compressed cache, then uncompress and insert into
// uncompressed cache
if (block_cache_compressed_handle == nullptr) {
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
return s;
}
// found compressed block
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
BlockContents* compressed_block =
block_cache_compressed.Value(block_cache_compressed_handle);
CompressionType compression_type = GetBlockCompressionType(*compressed_block);
assert(compression_type != kNoCompression);
// Retrieve the uncompressed contents into a new buffer
UncompressionContext context(compression_type);
UncompressionInfo info(context, uncompression_dict, compression_type);
s = UncompressSerializedBlock(
info, compressed_block->data.data(), compressed_block->data.size(),
&contents, rep_->table_options.format_version, rep_->ioptions,
GetMemoryAllocator(rep_->table_options));
// Insert parsed block into block cache, the priority is based on the
// data block type.
if (s.ok()) {
std::unique_ptr<TBlocklike> block_holder;
rep_->create_context.Create(&block_holder, std::move(contents));
if (block_cache && block_holder->own_bytes() && read_options.fill_cache) {
size_t charge = block_holder->ApproximateMemoryUsage();
BlockCacheTypedHandle<TBlocklike>* cache_handle = nullptr;
s = block_cache.InsertFull(cache_key, block_holder.get(), charge,
&cache_handle, priority,
rep_->ioptions.lowest_used_cache_tier);
if (s.ok()) {
assert(cache_handle != nullptr);
out_parsed_block->SetCachedValue(block_holder.release(),
block_cache.get(), cache_handle);
UpdateCacheInsertionMetrics(TBlocklike::kBlockType, get_context, charge,
s.IsOkOverwritten(), rep_->ioptions.stats);
} else {
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
}
} else {
out_parsed_block->SetOwnedValue(std::move(block_holder));
}
}
// Release hold on compressed cache entry
block_cache_compressed.Release(block_cache_compressed_handle);
return s;
}
template <typename TBlocklike>
WithBlocklikeCheck<Status, TBlocklike> BlockBasedTable::PutDataBlockToCache(
const Slice& cache_key, BlockCacheInterface<TBlocklike> block_cache,
CompressedBlockCacheInterface block_cache_compressed,
CachableEntry<TBlocklike>* out_parsed_block, BlockContents&& block_contents,
CompressionType block_comp_type,
const UncompressionDict& uncompression_dict,
@ -1409,32 +1345,6 @@ WithBlocklikeCheck<Status, TBlocklike> BlockBasedTable::PutDataBlockToCache(
rep_->create_context.Create(&block_holder, std::move(block_contents));
}
// Insert compressed block into compressed block cache.
// Release the hold on the compressed cache entry immediately.
if (block_cache_compressed && block_comp_type != kNoCompression &&
block_contents.own_bytes()) {
assert(block_contents.has_trailer);
assert(!cache_key.empty());
// We cannot directly put block_contents because this could point to
// an object in the stack.
auto block_cont_for_comp_cache =
std::make_unique<BlockContents>(std::move(block_contents));
size_t charge = block_cont_for_comp_cache->ApproximateMemoryUsage();
s = block_cache_compressed.Insert(cache_key,
block_cont_for_comp_cache.get(), charge,
nullptr /*handle*/, Cache::Priority::LOW);
if (s.ok()) {
// Cache took ownership
block_cont_for_comp_cache.release();
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
} else {
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
}
}
// insert into uncompressed block cache
if (block_cache && block_holder->own_bytes()) {
size_t charge = block_holder->ApproximateMemoryUsage();
@ -1545,8 +1455,6 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache(
const bool no_io = (ro.read_tier == kBlockCacheTier);
BlockCacheInterface<TBlocklike> block_cache{
rep_->table_options.block_cache.get()};
CompressedBlockCacheInterface block_cache_compressed{
rep_->table_options.block_cache_compressed.get()};
// First, try to get the block from the cache
//
@ -1555,14 +1463,13 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache(
CacheKey key_data;
Slice key;
bool is_cache_hit = false;
if (block_cache || block_cache_compressed) {
if (block_cache) {
// create key for block cache
key_data = GetCacheKey(rep_->base_cache_key, handle);
key = key_data.AsSlice();
if (!contents) {
s = GetDataBlockFromCache(key, block_cache, block_cache_compressed, ro,
out_parsed_block, uncompression_dict, wait,
s = GetDataBlockFromCache(key, block_cache, out_parsed_block, wait,
get_context);
// Value could still be null at this point, so check the cache handle
// and update the read pattern for prefetching
@ -1591,7 +1498,7 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache(
TBlocklike::kBlockType != BlockType::kFilter &&
TBlocklike::kBlockType != BlockType::kCompressionDictionary &&
rep_->blocks_maybe_compressed;
const bool do_uncompress = maybe_compressed && !block_cache_compressed;
const bool do_uncompress = maybe_compressed;
CompressionType contents_comp_type;
// Maybe serialized or uncompressed
BlockContents tmp_contents;
@ -1605,7 +1512,7 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache(
TBlocklike::kBlockType, uncompression_dict,
rep_->persistent_cache_options,
GetMemoryAllocator(rep_->table_options),
GetMemoryAllocatorForCompressedBlock(rep_->table_options));
/*allocator=*/nullptr);
// If prefetch_buffer is not allocated, it will fallback to synchronous
// reading of block contents.
@ -1641,8 +1548,8 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache(
// If filling cache is allowed and a cache is configured, try to put the
// block to the cache.
s = PutDataBlockToCache(
key, block_cache, block_cache_compressed, out_parsed_block,
std::move(*contents), contents_comp_type, uncompression_dict,
key, block_cache, out_parsed_block, std::move(*contents),
contents_comp_type, uncompression_dict,
GetMemoryAllocator(rep_->table_options), get_context);
}
}

@ -379,8 +379,7 @@ class BlockBasedTable : public TableReader {
IndexBlockIter* input_iter, GetContext* get_context,
BlockCacheLookupContext* lookup_context) const;
// Read block cache from block caches (if set): block_cache and
// block_cache_compressed.
// Read block cache from block caches (if set): block_cache.
// On success, Status::OK with be returned and @block will be populated with
// pointer to the block as well as its block handle.
// @param uncompression_dict Data for presetting the compression library's
@ -388,9 +387,7 @@ class BlockBasedTable : public TableReader {
template <typename TBlocklike>
WithBlocklikeCheck<Status, TBlocklike> GetDataBlockFromCache(
const Slice& cache_key, BlockCacheInterface<TBlocklike> block_cache,
CompressedBlockCacheInterface block_cache_compressed,
const ReadOptions& read_options, CachableEntry<TBlocklike>* block,
const UncompressionDict& uncompression_dict, const bool wait,
CachableEntry<TBlocklike>* block, const bool wait,
GetContext* get_context) const;
// Put a maybe compressed block to the corresponding block caches.
@ -406,7 +403,6 @@ class BlockBasedTable : public TableReader {
template <typename TBlocklike>
WithBlocklikeCheck<Status, TBlocklike> PutDataBlockToCache(
const Slice& cache_key, BlockCacheInterface<TBlocklike> block_cache,
CompressedBlockCacheInterface block_cache_compressed,
CachableEntry<TBlocklike>* cached_block, BlockContents&& block_contents,
CompressionType block_comp_type,
const UncompressionDict& uncompression_dict,

@ -244,9 +244,7 @@ DEFINE_SYNC_AND_ASYNC(void, BlockBasedTable::RetrieveMultipleBlocks)
// heap buffer or there is no cache at all.
CompressionType compression_type =
GetBlockCompressionType(serialized_block);
if (use_shared_buffer && (compression_type == kNoCompression ||
(compression_type != kNoCompression &&
rep_->table_options.block_cache_compressed))) {
if (use_shared_buffer && compression_type == kNoCompression) {
Slice serialized =
Slice(req.result.data() + req_offset, BlockSizeWithTrailer(handle));
serialized_block = BlockContents(
@ -523,7 +521,6 @@ DEFINE_SYNC_AND_ASYNC(void, BlockBasedTable::MultiGet)
// 3. If blocks are compressed and no compressed block cache, use
// stack buf
if (!rep_->file->use_direct_io() &&
rep_->table_options.block_cache_compressed == nullptr &&
rep_->blocks_maybe_compressed) {
if (total_len <= kMultiGetReadStackBufSize) {
scratch = stack_buf;

@ -103,10 +103,6 @@ struct BlockCreateContext : public Cache::CreateContext {
BlockContents&& block);
};
// Convenient cache interface to use with block_cache_compressed
using CompressedBlockCacheInterface =
BasicTypedCacheInterface<BlockContents, CacheEntryRole::kOtherBlock>;
// Convenient cache interface to use for block_cache, with support for
// SecondaryCache.
template <typename TBlocklike>

@ -22,13 +22,6 @@ inline MemoryAllocator* GetMemoryAllocator(
: nullptr;
}
inline MemoryAllocator* GetMemoryAllocatorForCompressedBlock(
const BlockBasedTableOptions& table_options) {
return table_options.block_cache_compressed.get()
? table_options.block_cache_compressed->memory_allocator()
: nullptr;
}
// Assumes block has a trailer as in format.h. file_name and offset provided
// for generating a diagnostic message in returned status.
extern Status VerifyBlockChecksum(ChecksumType type, const char* data,

@ -4370,7 +4370,6 @@ class Benchmark {
{/*.charged = */ FLAGS_charge_blob_cache
? CacheEntryRoleOptions::Decision::kEnabled
: CacheEntryRoleOptions::Decision::kDisabled}});
block_based_options.block_cache_compressed = compressed_cache_;
block_based_options.block_size = FLAGS_block_size;
block_based_options.block_restart_interval = FLAGS_block_restart_interval;
block_based_options.index_block_restart_interval =

@ -41,7 +41,6 @@ class MemoryTest : public testing::Test {
const auto bbto = factory->GetOptions<BlockBasedTableOptions>();
if (bbto != nullptr) {
cache_set->insert(bbto->block_cache.get());
cache_set->insert(bbto->block_cache_compressed.get());
}
}

@ -296,7 +296,7 @@ PersistentCacheDBTest::PersistentCacheDBTest()
// test template
void PersistentCacheDBTest::RunTest(
const std::function<std::shared_ptr<PersistentCacheTier>(bool)>& new_pcache,
const size_t max_keys = 100 * 1024, const size_t max_usecase = 5) {
const size_t max_keys = 100 * 1024, const size_t max_usecase = 3) {
// number of insertion interations
int num_iter = static_cast<int>(max_keys * kStressFactor);
@ -320,43 +320,21 @@ void PersistentCacheDBTest::RunTest(
pcache = new_pcache(/*is_compressed=*/true);
table_options.persistent_cache = pcache;
table_options.block_cache = NewLRUCache(size_max);
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 1:
// page cache, block cache, compressed cache
pcache = new_pcache(/*is_compressed=*/true);
table_options.persistent_cache = pcache;
table_options.block_cache = NewLRUCache(size_max);
table_options.block_cache_compressed = NewLRUCache(size_max);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 2:
// page cache, block cache, compressed cache + KNoCompression
// both block cache and compressed cache, but DB is not compressed
// also, make block cache sizes bigger, to trigger block cache hits
pcache = new_pcache(/*is_compressed=*/true);
table_options.persistent_cache = pcache;
table_options.block_cache = NewLRUCache(size_max);
table_options.block_cache_compressed = NewLRUCache(size_max);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.compression = kNoCompression;
break;
case 3:
// page cache, no block cache, no compressed cache
pcache = new_pcache(/*is_compressed=*/false);
table_options.persistent_cache = pcache;
table_options.block_cache = nullptr;
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 4:
case 2:
// page cache, no block cache, no compressed cache
// Page cache caches compressed blocks
pcache = new_pcache(/*is_compressed=*/true);
table_options.persistent_cache = pcache;
table_options.block_cache = nullptr;
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
default:
@ -372,10 +350,6 @@ void PersistentCacheDBTest::RunTest(
Verify(num_iter, values);
auto block_miss = TestGetTickerCount(options, BLOCK_CACHE_MISS);
auto compressed_block_hit =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
auto compressed_block_miss =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
auto page_hit = TestGetTickerCount(options, PERSISTENT_CACHE_HIT);
auto page_miss = TestGetTickerCount(options, PERSISTENT_CACHE_MISS);
@ -386,31 +360,12 @@ void PersistentCacheDBTest::RunTest(
ASSERT_GT(page_miss, 0);
ASSERT_GT(page_hit, 0);
ASSERT_GT(block_miss, 0);
ASSERT_EQ(compressed_block_miss, 0);
ASSERT_EQ(compressed_block_hit, 0);
break;
case 1:
// page cache, block cache, compressed cache
ASSERT_GT(page_miss, 0);
ASSERT_GT(block_miss, 0);
ASSERT_GT(compressed_block_miss, 0);
break;
case 2:
// page cache, block cache, compressed cache + KNoCompression
ASSERT_GT(page_miss, 0);
ASSERT_GT(page_hit, 0);
ASSERT_GT(block_miss, 0);
ASSERT_GT(compressed_block_miss, 0);
// remember kNoCompression
ASSERT_EQ(compressed_block_hit, 0);
break;
case 3:
case 4:
// page cache, no block cache, no compressed cache
ASSERT_GT(page_miss, 0);
ASSERT_GT(page_hit, 0);
ASSERT_EQ(compressed_block_hit, 0);
ASSERT_EQ(compressed_block_miss, 0);
break;
default:
FAIL();

Loading…
Cancel
Save