Fix MultiGet crash when no_block_cache is set (#5991)

Summary:
This PR fixes https://github.com/facebook/rocksdb/issues/5975. In ```BlockBasedTable::RetrieveMultipleBlocks()```, we were calling ```MaybeReadBlocksAndLoadToCache()```, which is a no-op if neither uncompressed nor compressed block cache are configured.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5991

Test Plan:
1. Add unit tests that fail with the old code and pass with the new
2. make check and asan_check

Cc spetrunia

Differential Revision: D18272744

Pulled By: anand1976

fbshipit-source-id: e62fa6090d1a6adf84fcd51dfd6859b03c6aebfe
main
anand76 5 years ago committed by Facebook Github Bot
parent 1da1f04231
commit 9836a1fa33
  1. 3
      HISTORY.md
  2. 30
      db/db_basic_test.cc
  3. 56
      table/block_based/block_based_table_reader.cc

@ -12,6 +12,9 @@ file_creation_time of the oldest SST file in the DB.
### Performance Improvements ### Performance Improvements
* For 64-bit hashing, RocksDB is standardizing on a slightly modified preview version of XXH3. This function is now used for many non-persisted hashes, along with fastrange64() in place of the modulus operator, and some benchmarks show a slight improvement. * For 64-bit hashing, RocksDB is standardizing on a slightly modified preview version of XXH3. This function is now used for many non-persisted hashes, along with fastrange64() in place of the modulus operator, and some benchmarks show a slight improvement.
### Bug Fixes
* Fix a assertion failure in MultiGe4t() when BlockBasedTableOptions::no_block_cache is true and there is no compressed block cache
## 6.5.1 (10/16/2019) ## 6.5.1 (10/16/2019)
### Bug Fixes ### Bug Fixes
* Revert the feature "Merging iterator to avoid child iterator reseek for some cases (#5286)" since it might cause strange results when reseek happens with a different iterator upper bound. * Revert the feature "Merging iterator to avoid child iterator reseek for some cases (#5286)" since it might cause strange results when reseek happens with a different iterator upper bound.

@ -1577,8 +1577,12 @@ class DBBasicTestWithParallelIO
Options options = CurrentOptions(); Options options = CurrentOptions();
Random rnd(301); Random rnd(301);
BlockBasedTableOptions table_options; BlockBasedTableOptions table_options;
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
table_options.block_cache = uncompressed_cache_; table_options.block_cache = uncompressed_cache_;
if (table_options.block_cache == nullptr) {
table_options.no_block_cache = true;
} else {
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
}
table_options.block_cache_compressed = compressed_cache_; table_options.block_cache_compressed = compressed_cache_;
table_options.flush_block_policy_factory.reset( table_options.flush_block_policy_factory.reset(
new MyFlushBlockPolicyFactory()); new MyFlushBlockPolicyFactory());
@ -1614,6 +1618,9 @@ class DBBasicTestWithParallelIO
int num_inserts_compressed() { return compressed_cache_->num_inserts(); } int num_inserts_compressed() { return compressed_cache_->num_inserts(); }
bool fill_cache() { return fill_cache_; } bool fill_cache() { return fill_cache_; }
bool compression_enabled() { return compression_enabled_; }
bool has_compressed_cache() { return compressed_cache_ != nullptr; }
bool has_uncompressed_cache() { return uncompressed_cache_ != nullptr; }
static void SetUpTestCase() {} static void SetUpTestCase() {}
static void TearDownTestCase() {} static void TearDownTestCase() {}
@ -1788,7 +1795,16 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
ASSERT_TRUE(CheckValue(1, values[0].ToString())); ASSERT_TRUE(CheckValue(1, values[0].ToString()));
ASSERT_TRUE(CheckValue(51, values[1].ToString())); ASSERT_TRUE(CheckValue(51, values[1].ToString()));
int expected_reads = random_reads + (fill_cache() ? 0 : 2); bool read_from_cache = false;
if (fill_cache()) {
if (has_uncompressed_cache()) {
read_from_cache = true;
} else if (has_compressed_cache() && compression_enabled()) {
read_from_cache = true;
}
}
int expected_reads = random_reads + (read_from_cache ? 0 : 2);
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads); ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
keys.resize(10); keys.resize(10);
@ -1806,7 +1822,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
ASSERT_OK(statuses[i]); ASSERT_OK(statuses[i]);
ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString())); ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString()));
} }
expected_reads += (fill_cache() ? 2 : 4); expected_reads += (read_from_cache ? 2 : 4);
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads); ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
} }
@ -1817,12 +1833,8 @@ INSTANTIATE_TEST_CASE_P(
// Param 1 - Uncompressed cache enabled // Param 1 - Uncompressed cache enabled
// Param 2 - Data compression enabled // Param 2 - Data compression enabled
// Param 3 - ReadOptions::fill_cache // Param 3 - ReadOptions::fill_cache
::testing::Values(std::make_tuple(false, true, true, true), ::testing::Combine(::testing::Bool(), ::testing::Bool(),
std::make_tuple(true, true, true, true), ::testing::Bool(), ::testing::Bool()));
std::make_tuple(false, true, false, true),
std::make_tuple(false, true, true, false),
std::make_tuple(true, true, true, false),
std::make_tuple(false, true, false, false)));
class DBBasicTestWithTimestampWithParam class DBBasicTestWithTimestampWithParam
: public DBTestBase, : public DBTestBase,

@ -2398,34 +2398,42 @@ void BlockBasedTable::RetrieveMultipleBlocks(
nullptr, options, handle, uncompression_dict, block_entry, nullptr, options, handle, uncompression_dict, block_entry,
BlockType::kData, mget_iter->get_context, BlockType::kData, mget_iter->get_context,
&lookup_data_block_context, &raw_block_contents); &lookup_data_block_context, &raw_block_contents);
// block_entry value could be null if no block cache is present, i.e
// BlockBasedTableOptions::no_block_cache is true and no compressed
// block cache is configured. In that case, fall
// through and set up the block explicitly
if (block_entry->GetValue() != nullptr) {
continue;
}
}
CompressionType compression_type =
raw_block_contents.get_compression_type();
BlockContents contents;
if (compression_type != kNoCompression) {
UncompressionContext context(compression_type);
UncompressionInfo info(context, uncompression_dict, compression_type);
s = UncompressBlockContents(info, req.result.data(), handle.size(),
&contents, footer.version(),
rep_->ioptions, memory_allocator);
} else { } else {
CompressionType compression_type = if (scratch != nullptr) {
raw_block_contents.get_compression_type(); // If we used the scratch buffer, then the contents need to be
BlockContents contents; // copied to heap
if (compression_type != kNoCompression) { Slice raw = Slice(req.result.data(), handle.size());
UncompressionContext context(compression_type); contents = BlockContents(
UncompressionInfo info(context, uncompression_dict, compression_type); CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
s = UncompressBlockContents(info, req.result.data(), handle.size(), handle.size());
&contents, footer.version(),
rep_->ioptions, memory_allocator);
} else { } else {
if (scratch != nullptr) { contents = std::move(raw_block_contents);
// If we used the scratch buffer, then the contents need to be
// copied to heap
Slice raw = Slice(req.result.data(), handle.size());
contents = BlockContents(
CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
handle.size());
} else {
contents = std::move(raw_block_contents);
}
}
if (s.ok()) {
(*results)[idx_in_batch].SetOwnedValue(
new Block(std::move(contents), global_seqno,
read_amp_bytes_per_bit, ioptions.statistics));
} }
} }
if (s.ok()) {
(*results)[idx_in_batch].SetOwnedValue(
new Block(std::move(contents), global_seqno,
read_amp_bytes_per_bit, ioptions.statistics));
}
} }
(*statuses)[idx_in_batch] = s; (*statuses)[idx_in_batch] = s;
} }

Loading…
Cancel
Save