Refactor: Add BlockTypes to make them imply C++ type in block cache (#10098)

Summary:
We have three related concepts:
* BlockType: an internal enum conceptually indicating a type of SST file
block
* CacheEntryRole: a user-facing enum for categorizing block cache entries,
which is also involved in associated cache entries with an appropriate
deleter. Can include categories for non-block cache entries (e.g. memory
reservations).
* TBlocklike: a C++ type for the actual type behind a void* cache entry.

We had some existing code ugliness because BlockType did not imply
TBlocklike, because of various kinds of "filter" block. This refactoring
fixes that with new BlockTypes.

More clean-up can come in later work.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10098

Test Plan: existing tests

Reviewed By: akankshamahajan15

Differential Revision: D36897945

Pulled By: pdillinger

fbshipit-source-id: 3ae496b5caa81e0a0ed85e873eb5b525e2d9a295
main
Peter Dillinger 2 years ago committed by Facebook GitHub Bot
parent e36008d863
commit 4f78f9699b
  1. 4
      include/rocksdb/table.h
  2. 1
      include/rocksdb/trace_record.h
  3. 12
      table/block_based/block_based_filter_block.cc
  4. 82
      table/block_based/block_based_table_builder.cc
  5. 6
      table/block_based/block_based_table_builder.h
  6. 39
      table/block_based/block_based_table_reader.cc
  7. 2
      table/block_based/block_based_table_reader.h
  8. 4
      table/block_based/block_like_traits.h
  9. 7
      table/block_based/block_type.h
  10. 8
      table/block_based/filter_block_reader_common.cc
  11. 8
      table/block_based/filter_block_reader_common.h
  12. 13
      table/block_based/full_filter_block.cc
  13. 33
      table/block_based/partitioned_filter_block.cc
  14. 2
      table/block_fetcher.cc
  15. 5
      utilities/cache_dump_load_impl.cc

@ -147,8 +147,8 @@ struct BlockBasedTableOptions {
// If cache_index_and_filter_blocks is enabled, cache index and filter // If cache_index_and_filter_blocks is enabled, cache index and filter
// blocks with high priority. If set to true, depending on implementation of // blocks with high priority. If set to true, depending on implementation of
// block cache, index and filter blocks may be less likely to be evicted // block cache, index, filter, and other metadata blocks may be less likely
// than data blocks. // to be evicted than data blocks.
bool cache_index_and_filter_blocks_with_high_priority = true; bool cache_index_and_filter_blocks_with_high_priority = true;
// DEPRECATED: This option will be removed in a future version. For now, this // DEPRECATED: This option will be removed in a future version. For now, this

@ -30,6 +30,7 @@ enum TraceType : char {
kTraceIteratorSeekForPrev = 6, kTraceIteratorSeekForPrev = 6,
// Block cache tracing related trace types. // Block cache tracing related trace types.
kBlockTraceIndexBlock = 7, kBlockTraceIndexBlock = 7,
// TODO: split out kinds of filter blocks?
kBlockTraceFilterBlock = 8, kBlockTraceFilterBlock = 8,
kBlockTraceDataBlock = 9, kBlockTraceDataBlock = 9,
kBlockTraceUncompressionDictBlock = 10, kBlockTraceUncompressionDictBlock = 10,

@ -187,9 +187,9 @@ std::unique_ptr<FilterBlockReader> BlockBasedFilterBlockReader::Create(
CachableEntry<BlockContents> filter_block; CachableEntry<BlockContents> filter_block;
if (prefetch || !use_cache) { if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache, const Status s = ReadFilterBlock(
nullptr /* get_context */, lookup_context, table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
&filter_block); lookup_context, &filter_block, BlockType::kDeprecatedFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>(); return std::unique_ptr<FilterBlockReader>();
@ -257,7 +257,8 @@ bool BlockBasedFilterBlockReader::MayMatch(
CachableEntry<BlockContents> filter_block; CachableEntry<BlockContents> filter_block;
const Status s = const Status s =
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block); GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block,
BlockType::kDeprecatedFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return true; return true;
@ -316,7 +317,8 @@ std::string BlockBasedFilterBlockReader::ToString() const {
const Status s = const Status s =
GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */, GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
nullptr /* lookup_context */, &filter_block); nullptr /* lookup_context */, &filter_block,
BlockType::kDeprecatedFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return std::string("Unable to retrieve filter block"); return std::string("Unable to retrieve filter block");

@ -1250,8 +1250,7 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
CompressionType type, CompressionType type,
BlockHandle* handle, BlockHandle* handle,
BlockType block_type, BlockType block_type,
const Slice* raw_block_contents, const Slice* raw_block_contents) {
bool is_top_level_filter_block) {
Rep* r = rep_; Rep* r = rep_;
bool is_data_block = block_type == BlockType::kData; bool is_data_block = block_type == BlockType::kData;
StopWatch sw(r->ioptions.clock, r->ioptions.stats, WRITE_RAW_BLOCK_MICROS); StopWatch sw(r->ioptions.clock, r->ioptions.stats, WRITE_RAW_BLOCK_MICROS);
@ -1311,11 +1310,9 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
} }
if (warm_cache) { if (warm_cache) {
if (type == kNoCompression) { if (type == kNoCompression) {
s = InsertBlockInCacheHelper(block_contents, handle, block_type, s = InsertBlockInCacheHelper(block_contents, handle, block_type);
is_top_level_filter_block);
} else if (raw_block_contents != nullptr) { } else if (raw_block_contents != nullptr) {
s = InsertBlockInCacheHelper(*raw_block_contents, handle, block_type, s = InsertBlockInCacheHelper(*raw_block_contents, handle, block_type);
is_top_level_filter_block);
} }
if (!s.ok()) { if (!s.ok()) {
r->SetStatus(s); r->SetStatus(s);
@ -1491,25 +1488,28 @@ Status BlockBasedTableBuilder::InsertBlockInCompressedCache(
Status BlockBasedTableBuilder::InsertBlockInCacheHelper( Status BlockBasedTableBuilder::InsertBlockInCacheHelper(
const Slice& block_contents, const BlockHandle* handle, const Slice& block_contents, const BlockHandle* handle,
BlockType block_type, bool is_top_level_filter_block) { BlockType block_type) {
Status s; Status s;
if (block_type == BlockType::kData || block_type == BlockType::kIndex) { switch (block_type) {
s = InsertBlockInCache<Block>(block_contents, handle, block_type); case BlockType::kData:
} else if (block_type == BlockType::kFilter) { case BlockType::kIndex:
if (rep_->filter_builder->IsBlockBased()) { case BlockType::kFilterPartitionIndex:
// for block-based filter which is deprecated.
s = InsertBlockInCache<BlockContents>(block_contents, handle, block_type);
} else if (is_top_level_filter_block) {
// for top level filter block in partitioned filter.
s = InsertBlockInCache<Block>(block_contents, handle, block_type); s = InsertBlockInCache<Block>(block_contents, handle, block_type);
} else { break;
// for second level partitioned filters and full filters. case BlockType::kDeprecatedFilter:
s = InsertBlockInCache<BlockContents>(block_contents, handle, block_type);
break;
case BlockType::kFilter:
s = InsertBlockInCache<ParsedFullFilterBlock>(block_contents, handle, s = InsertBlockInCache<ParsedFullFilterBlock>(block_contents, handle,
block_type); block_type);
} break;
} else if (block_type == BlockType::kCompressionDictionary) { case BlockType::kCompressionDictionary:
s = InsertBlockInCache<UncompressionDict>(block_contents, handle, s = InsertBlockInCache<UncompressionDict>(block_contents, handle,
block_type); block_type);
break;
default:
// no-op / not cached
break;
} }
return s; return s;
} }
@ -1563,10 +1563,14 @@ Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
void BlockBasedTableBuilder::WriteFilterBlock( void BlockBasedTableBuilder::WriteFilterBlock(
MetaIndexBuilder* meta_index_builder) { MetaIndexBuilder* meta_index_builder) {
if (rep_->filter_builder == nullptr || rep_->filter_builder->IsEmpty()) {
// No filter block needed
return;
}
BlockHandle filter_block_handle; BlockHandle filter_block_handle;
bool empty_filter_block = bool is_block_based_filter = rep_->filter_builder->IsBlockBased();
(rep_->filter_builder == nullptr || rep_->filter_builder->IsEmpty()); bool is_partitioned_filter = rep_->table_options.partition_filters;
if (ok() && !empty_filter_block) { if (ok()) {
rep_->props.num_filter_entries += rep_->props.num_filter_entries +=
rep_->filter_builder->EstimateEntriesAdded(); rep_->filter_builder->EstimateEntriesAdded();
Status s = Status::Incomplete(); Status s = Status::Incomplete();
@ -1591,31 +1595,23 @@ void BlockBasedTableBuilder::WriteFilterBlock(
rep_->props.filter_size += filter_content.size(); rep_->props.filter_size += filter_content.size();
// TODO: Refactor code so that BlockType can determine both the C++ type BlockType btype = is_block_based_filter ? BlockType::kDeprecatedFilter
// of a block cache entry (TBlocklike) and the CacheEntryRole while : is_partitioned_filter && /* last */ s.ok()
// inserting blocks in cache. ? BlockType::kFilterPartitionIndex
bool top_level_filter_block = false; : BlockType::kFilter;
if (s.ok() && rep_->table_options.partition_filters && WriteRawBlock(filter_content, kNoCompression, &filter_block_handle, btype,
!rep_->filter_builder->IsBlockBased()) { nullptr /*raw_contents*/);
top_level_filter_block = true;
}
WriteRawBlock(filter_content, kNoCompression, &filter_block_handle,
BlockType::kFilter, nullptr /*raw_contents*/,
top_level_filter_block);
} }
rep_->filter_builder->ResetFilterBitsBuilder(); rep_->filter_builder->ResetFilterBitsBuilder();
} }
if (ok() && !empty_filter_block) { if (ok()) {
// Add mapping from "<filter_block_prefix>.Name" to location // Add mapping from "<filter_block_prefix>.Name" to location
// of filter data. // of filter data.
std::string key; std::string key;
if (rep_->filter_builder->IsBlockBased()) { key = is_block_based_filter ? BlockBasedTable::kFilterBlockPrefix
key = BlockBasedTable::kFilterBlockPrefix; : is_partitioned_filter
} else { ? BlockBasedTable::kPartitionedFilterBlockPrefix
key = rep_->table_options.partition_filters : BlockBasedTable::kFullFilterBlockPrefix;
? BlockBasedTable::kPartitionedFilterBlockPrefix
: BlockBasedTable::kFullFilterBlockPrefix;
}
key.append(rep_->table_options.filter_policy->CompatibilityName()); key.append(rep_->table_options.filter_policy->CompatibilityName());
meta_index_builder->Add(key, filter_block_handle); meta_index_builder->Add(key, filter_block_handle);
} }

@ -119,8 +119,7 @@ class BlockBasedTableBuilder : public TableBuilder {
BlockType block_type); BlockType block_type);
// Directly write data to the file. // Directly write data to the file.
void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle, void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle,
BlockType block_type, const Slice* raw_data = nullptr, BlockType block_type, const Slice* raw_data = nullptr);
bool is_top_level_filter_block = false);
void SetupCacheKeyPrefix(const TableBuilderOptions& tbo); void SetupCacheKeyPrefix(const TableBuilderOptions& tbo);
@ -130,8 +129,7 @@ class BlockBasedTableBuilder : public TableBuilder {
Status InsertBlockInCacheHelper(const Slice& block_contents, Status InsertBlockInCacheHelper(const Slice& block_contents,
const BlockHandle* handle, const BlockHandle* handle,
BlockType block_type, BlockType block_type);
bool is_top_level_filter_block);
Status InsertBlockInCompressedCache(const Slice& block_contents, Status InsertBlockInCompressedCache(const Slice& block_contents,
const CompressionType type, const CompressionType type,

@ -195,6 +195,8 @@ void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
switch (block_type) { switch (block_type) {
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
PERF_COUNTER_ADD(block_cache_filter_hit_count, 1); PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);
if (get_context) { if (get_context) {
@ -252,6 +254,8 @@ void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
// TODO: introduce perf counters for misses per block type // TODO: introduce perf counters for misses per block type
switch (block_type) { switch (block_type) {
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
if (get_context) { if (get_context) {
++get_context->get_context_stats_.num_cache_filter_miss; ++get_context->get_context_stats_.num_cache_filter_miss;
} else { } else {
@ -307,6 +311,8 @@ void BlockBasedTable::UpdateCacheInsertionMetrics(
switch (block_type) { switch (block_type) {
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
if (get_context) { if (get_context) {
++get_context->get_context_stats_.num_cache_filter_add; ++get_context->get_context_stats_.num_cache_filter_add;
if (redundant) { if (redundant) {
@ -1217,11 +1223,16 @@ Status BlockBasedTable::GetDataBlockFromCache(
: 0; : 0;
assert(block); assert(block);
assert(block->IsEmpty()); assert(block->IsEmpty());
// Here we treat the legacy name "...index_and_filter_blocks..." to mean all
// metadata blocks that might go into block cache, EXCEPT only those needed
// for the read path (Get, etc.). TableProperties should not be needed on the
// read path (prefix extractor setting is an O(1) size special case that we
// are working not to require from TableProperties), so it is not given
// high-priority treatment if it should go into BlockCache.
const Cache::Priority priority = const Cache::Priority priority =
rep_->table_options.cache_index_and_filter_blocks_with_high_priority && rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
(block_type == BlockType::kFilter || block_type != BlockType::kData &&
block_type == BlockType::kCompressionDictionary || block_type != BlockType::kProperties
block_type == BlockType::kIndex)
? Cache::Priority::HIGH ? Cache::Priority::HIGH
: Cache::Priority::LOW; : Cache::Priority::LOW;
@ -1348,9 +1359,7 @@ Status BlockBasedTable::PutDataBlockToCache(
: 0; : 0;
const Cache::Priority priority = const Cache::Priority priority =
rep_->table_options.cache_index_and_filter_blocks_with_high_priority && rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
(block_type == BlockType::kFilter || block_type != BlockType::kData
block_type == BlockType::kCompressionDictionary ||
block_type == BlockType::kIndex)
? Cache::Priority::HIGH ? Cache::Priority::HIGH
: Cache::Priority::LOW; : Cache::Priority::LOW;
assert(cached_block); assert(cached_block);
@ -1603,6 +1612,8 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
++get_context->get_context_stats_.num_index_read; ++get_context->get_context_stats_.num_index_read;
break; break;
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
++get_context->get_context_stats_.num_filter_read; ++get_context->get_context_stats_.num_filter_read;
break; break;
case BlockType::kData: case BlockType::kData:
@ -1645,6 +1656,8 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
trace_block_type = TraceType::kBlockTraceDataBlock; trace_block_type = TraceType::kBlockTraceDataBlock;
break; break;
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
trace_block_type = TraceType::kBlockTraceFilterBlock; trace_block_type = TraceType::kBlockTraceFilterBlock;
break; break;
case BlockType::kCompressionDictionary: case BlockType::kCompressionDictionary:
@ -1759,6 +1772,8 @@ Status BlockBasedTable::RetrieveBlock(
++(get_context->get_context_stats_.num_index_read); ++(get_context->get_context_stats_.num_index_read);
break; break;
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
++(get_context->get_context_stats_.num_filter_read); ++(get_context->get_context_stats_.num_filter_read);
break; break;
case BlockType::kData: case BlockType::kData:
@ -2421,12 +2436,18 @@ Status BlockBasedTable::VerifyChecksumInBlocks(
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName( BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
const Slice& meta_block_name) { const Slice& meta_block_name) {
if (meta_block_name.starts_with(kFilterBlockPrefix) || if (meta_block_name.starts_with(kFilterBlockPrefix)) {
meta_block_name.starts_with(kFullFilterBlockPrefix) || return BlockType::kDeprecatedFilter;
meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) { }
if (meta_block_name.starts_with(kFullFilterBlockPrefix)) {
return BlockType::kFilter; return BlockType::kFilter;
} }
if (meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
return BlockType::kFilterPartitionIndex;
}
if (meta_block_name == kPropertiesBlockName) { if (meta_block_name == kPropertiesBlockName) {
return BlockType::kProperties; return BlockType::kProperties;
} }

@ -641,7 +641,7 @@ struct BlockBasedTable::Rep {
table_reader_cache_res_handle = nullptr; table_reader_cache_res_handle = nullptr;
SequenceNumber get_global_seqno(BlockType block_type) const { SequenceNumber get_global_seqno(BlockType block_type) const {
return (block_type == BlockType::kFilter || return (block_type == BlockType::kFilterPartitionIndex ||
block_type == BlockType::kCompressionDictionary) block_type == BlockType::kCompressionDictionary)
? kDisableGlobalSequenceNumber ? kDisableGlobalSequenceNumber
: global_seqno; : global_seqno;

@ -73,7 +73,7 @@ class BlocklikeTraits<BlockContents> {
} }
static Cache::CacheItemHelper* GetCacheItemHelper(BlockType block_type) { static Cache::CacheItemHelper* GetCacheItemHelper(BlockType block_type) {
if (block_type == BlockType::kFilter) { if (block_type == BlockType::kDeprecatedFilter) {
return GetCacheItemHelperForRole< return GetCacheItemHelperForRole<
BlockContents, CacheEntryRole::kDeprecatedFilterBlock>(); BlockContents, CacheEntryRole::kDeprecatedFilterBlock>();
} else { } else {
@ -160,7 +160,7 @@ class BlocklikeTraits<Block> {
return GetCacheItemHelperForRole<Block, CacheEntryRole::kDataBlock>(); return GetCacheItemHelperForRole<Block, CacheEntryRole::kDataBlock>();
case BlockType::kIndex: case BlockType::kIndex:
return GetCacheItemHelperForRole<Block, CacheEntryRole::kIndexBlock>(); return GetCacheItemHelperForRole<Block, CacheEntryRole::kIndexBlock>();
case BlockType::kFilter: case BlockType::kFilterPartitionIndex:
return GetCacheItemHelperForRole<Block, return GetCacheItemHelperForRole<Block,
CacheEntryRole::kFilterMetaBlock>(); CacheEntryRole::kFilterMetaBlock>();
default: default:

@ -14,10 +14,13 @@ namespace ROCKSDB_NAMESPACE {
// Represents the types of blocks used in the block based table format. // Represents the types of blocks used in the block based table format.
// See https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format // See https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format
// for details. // for details.
// For code sanity, BlockType should imply a specific TBlocklike for
// BlocklikeTraits.
enum class BlockType : uint8_t { enum class BlockType : uint8_t {
kData, kData,
kFilter, kFilter, // for second level partitioned filters and full filters
kFilterPartitionIndex, // for top-level index of filter partitions
kDeprecatedFilter, // for old, deprecated block-based filter
kProperties, kProperties,
kCompressionDictionary, kCompressionDictionary,
kRangeDeletion, kRangeDeletion,

@ -16,7 +16,7 @@ Status FilterBlockReaderCommon<TBlocklike>::ReadFilterBlock(
const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer, const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
const ReadOptions& read_options, bool use_cache, GetContext* get_context, const ReadOptions& read_options, bool use_cache, GetContext* get_context,
BlockCacheLookupContext* lookup_context, BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) { CachableEntry<TBlocklike>* filter_block, BlockType block_type) {
PERF_TIMER_GUARD(read_filter_block_nanos); PERF_TIMER_GUARD(read_filter_block_nanos);
assert(table); assert(table);
@ -29,7 +29,7 @@ Status FilterBlockReaderCommon<TBlocklike>::ReadFilterBlock(
const Status s = const Status s =
table->RetrieveBlock(prefetch_buffer, read_options, rep->filter_handle, table->RetrieveBlock(prefetch_buffer, read_options, rep->filter_handle,
UncompressionDict::GetEmptyDict(), filter_block, UncompressionDict::GetEmptyDict(), filter_block,
BlockType::kFilter, get_context, lookup_context, block_type, get_context, lookup_context,
/* for_compaction */ false, use_cache, /* for_compaction */ false, use_cache,
/* wait_for_cache */ true, /* async_read */ false); /* wait_for_cache */ true, /* async_read */ false);
@ -67,7 +67,7 @@ template <typename TBlocklike>
Status FilterBlockReaderCommon<TBlocklike>::GetOrReadFilterBlock( Status FilterBlockReaderCommon<TBlocklike>::GetOrReadFilterBlock(
bool no_io, GetContext* get_context, bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context, BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) const { CachableEntry<TBlocklike>* filter_block, BlockType block_type) const {
assert(filter_block); assert(filter_block);
if (!filter_block_.IsEmpty()) { if (!filter_block_.IsEmpty()) {
@ -82,7 +82,7 @@ Status FilterBlockReaderCommon<TBlocklike>::GetOrReadFilterBlock(
return ReadFilterBlock(table_, nullptr /* prefetch_buffer */, read_options, return ReadFilterBlock(table_, nullptr /* prefetch_buffer */, read_options,
cache_filter_blocks(), get_context, lookup_context, cache_filter_blocks(), get_context, lookup_context,
filter_block); filter_block, block_type);
} }
template <typename TBlocklike> template <typename TBlocklike>

@ -7,6 +7,8 @@
#pragma once #pragma once
#include <cassert> #include <cassert>
#include "block_type.h"
#include "table/block_based/cachable_entry.h" #include "table/block_based/cachable_entry.h"
#include "table/block_based/filter_block.h" #include "table/block_based/filter_block.h"
@ -46,7 +48,8 @@ class FilterBlockReaderCommon : public FilterBlockReader {
const ReadOptions& read_options, bool use_cache, const ReadOptions& read_options, bool use_cache,
GetContext* get_context, GetContext* get_context,
BlockCacheLookupContext* lookup_context, BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block); CachableEntry<TBlocklike>* filter_block,
BlockType block_type);
const BlockBasedTable* table() const { return table_; } const BlockBasedTable* table() const { return table_; }
const SliceTransform* table_prefix_extractor() const; const SliceTransform* table_prefix_extractor() const;
@ -55,7 +58,8 @@ class FilterBlockReaderCommon : public FilterBlockReader {
Status GetOrReadFilterBlock(bool no_io, GetContext* get_context, Status GetOrReadFilterBlock(bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context, BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) const; CachableEntry<TBlocklike>* filter_block,
BlockType block_type) const;
size_t ApproximateFilterBlockMemoryUsage() const; size_t ApproximateFilterBlockMemoryUsage() const;

@ -4,8 +4,10 @@
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#include "table/block_based/full_filter_block.h" #include "table/block_based/full_filter_block.h"
#include <array> #include <array>
#include "block_type.h"
#include "monitoring/perf_context_imp.h" #include "monitoring/perf_context_imp.h"
#include "port/malloc.h" #include "port/malloc.h"
#include "port/port.h" #include "port/port.h"
@ -149,7 +151,7 @@ std::unique_ptr<FilterBlockReader> FullFilterBlockReader::Create(
if (prefetch || !use_cache) { if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache, const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache,
nullptr /* get_context */, lookup_context, nullptr /* get_context */, lookup_context,
&filter_block); &filter_block, BlockType::kFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>(); return std::unique_ptr<FilterBlockReader>();
@ -181,8 +183,8 @@ bool FullFilterBlockReader::MayMatch(
BlockCacheLookupContext* lookup_context) const { BlockCacheLookupContext* lookup_context) const {
CachableEntry<ParsedFullFilterBlock> filter_block; CachableEntry<ParsedFullFilterBlock> filter_block;
const Status s = const Status s = GetOrReadFilterBlock(no_io, get_context, lookup_context,
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block); &filter_block, BlockType::kFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return true; return true;
@ -237,8 +239,9 @@ void FullFilterBlockReader::MayMatch(
BlockCacheLookupContext* lookup_context) const { BlockCacheLookupContext* lookup_context) const {
CachableEntry<ParsedFullFilterBlock> filter_block; CachableEntry<ParsedFullFilterBlock> filter_block;
const Status s = GetOrReadFilterBlock(no_io, range->begin()->get_context, const Status s =
lookup_context, &filter_block); GetOrReadFilterBlock(no_io, range->begin()->get_context, lookup_context,
&filter_block, BlockType::kFilter);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return; return;

@ -7,6 +7,7 @@
#include <utility> #include <utility>
#include "block_type.h"
#include "file/random_access_file_reader.h" #include "file/random_access_file_reader.h"
#include "logging/logging.h" #include "logging/logging.h"
#include "monitoring/perf_context_imp.h" #include "monitoring/perf_context_imp.h"
@ -197,9 +198,9 @@ std::unique_ptr<FilterBlockReader> PartitionedFilterBlockReader::Create(
CachableEntry<Block> filter_block; CachableEntry<Block> filter_block;
if (prefetch || !use_cache) { if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache, const Status s = ReadFilterBlock(
nullptr /* get_context */, lookup_context, table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
&filter_block); lookup_context, &filter_block, BlockType::kFilterPartitionIndex);
if (!s.ok()) { if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>(); return std::unique_ptr<FilterBlockReader>();
@ -277,9 +278,10 @@ BlockHandle PartitionedFilterBlockReader::GetFilterPartitionHandle(
Statistics* kNullStats = nullptr; Statistics* kNullStats = nullptr;
filter_block.GetValue()->NewIndexIterator( filter_block.GetValue()->NewIndexIterator(
comparator->user_comparator(), comparator->user_comparator(),
table()->get_rep()->get_global_seqno(BlockType::kFilter), &iter, table()->get_rep()->get_global_seqno(BlockType::kFilterPartitionIndex),
kNullStats, true /* total_order_seek */, false /* have_first_key */, &iter, kNullStats, true /* total_order_seek */,
index_key_includes_seq(), index_value_is_full()); false /* have_first_key */, index_key_includes_seq(),
index_value_is_full());
iter.Seek(entry); iter.Seek(entry);
if (UNLIKELY(!iter.Valid())) { if (UNLIKELY(!iter.Valid())) {
// entry is larger than all the keys. However its prefix might still be // entry is larger than all the keys. However its prefix might still be
@ -335,7 +337,8 @@ bool PartitionedFilterBlockReader::MayMatch(
FilterFunction filter_function) const { FilterFunction filter_function) const {
CachableEntry<Block> filter_block; CachableEntry<Block> filter_block;
Status s = Status s =
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block); GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block,
BlockType::kFilterPartitionIndex);
if (UNLIKELY(!s.ok())) { if (UNLIKELY(!s.ok())) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return true; return true;
@ -371,8 +374,9 @@ void PartitionedFilterBlockReader::MayMatch(
uint64_t block_offset, bool no_io, BlockCacheLookupContext* lookup_context, uint64_t block_offset, bool no_io, BlockCacheLookupContext* lookup_context,
FilterManyFunction filter_function) const { FilterManyFunction filter_function) const {
CachableEntry<Block> filter_block; CachableEntry<Block> filter_block;
Status s = GetOrReadFilterBlock(no_io, range->begin()->get_context, Status s =
lookup_context, &filter_block); GetOrReadFilterBlock(no_io, range->begin()->get_context, lookup_context,
&filter_block, BlockType::kFilterPartitionIndex);
if (UNLIKELY(!s.ok())) { if (UNLIKELY(!s.ok())) {
IGNORE_STATUS_IF_ERROR(s); IGNORE_STATUS_IF_ERROR(s);
return; // Any/all may match return; // Any/all may match
@ -463,7 +467,8 @@ Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
CachableEntry<Block> filter_block; CachableEntry<Block> filter_block;
Status s = GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */, Status s = GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
&lookup_context, &filter_block); &lookup_context, &filter_block,
BlockType::kFilterPartitionIndex);
if (!s.ok()) { if (!s.ok()) {
ROCKS_LOG_ERROR(rep->ioptions.logger, ROCKS_LOG_ERROR(rep->ioptions.logger,
"Error retrieving top-level filter block while trying to " "Error retrieving top-level filter block while trying to "
@ -479,10 +484,10 @@ Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
const InternalKeyComparator* const comparator = internal_comparator(); const InternalKeyComparator* const comparator = internal_comparator();
Statistics* kNullStats = nullptr; Statistics* kNullStats = nullptr;
filter_block.GetValue()->NewIndexIterator( filter_block.GetValue()->NewIndexIterator(
comparator->user_comparator(), rep->get_global_seqno(BlockType::kFilter), comparator->user_comparator(),
&biter, kNullStats, true /* total_order_seek */, rep->get_global_seqno(BlockType::kFilterPartitionIndex), &biter,
false /* have_first_key */, index_key_includes_seq(), kNullStats, true /* total_order_seek */, false /* have_first_key */,
index_value_is_full()); index_key_includes_seq(), index_value_is_full());
// Index partitions are assumed to be consecuitive. Prefetch them all. // Index partitions are assumed to be consecuitive. Prefetch them all.
// Read the first block offset // Read the first block offset
biter.SeekToFirst(); biter.SeekToFirst();

@ -284,6 +284,8 @@ IOStatus BlockFetcher::ReadBlockContents() {
// TODO: introduce dedicated perf counter for range tombstones // TODO: introduce dedicated perf counter for range tombstones
switch (block_type_) { switch (block_type_) {
case BlockType::kFilter: case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
PERF_COUNTER_ADD(filter_block_read_count, 1); PERF_COUNTER_ADD(filter_block_read_count, 1);
break; break;

@ -324,7 +324,7 @@ IOStatus CacheDumpedLoaderImpl::RestoreCacheEntriesToSecondaryCache() {
switch (dump_unit.type) { switch (dump_unit.type) {
case CacheDumpUnitType::kDeprecatedFilterBlock: { case CacheDumpUnitType::kDeprecatedFilterBlock: {
helper = BlocklikeTraits<BlockContents>::GetCacheItemHelper( helper = BlocklikeTraits<BlockContents>::GetCacheItemHelper(
BlockType::kFilter); BlockType::kDeprecatedFilter);
std::unique_ptr<BlockContents> block_holder; std::unique_ptr<BlockContents> block_holder;
block_holder.reset(BlocklikeTraits<BlockContents>::Create( block_holder.reset(BlocklikeTraits<BlockContents>::Create(
std::move(raw_block_contents), 0, statistics, false, std::move(raw_block_contents), 0, statistics, false,
@ -376,7 +376,8 @@ IOStatus CacheDumpedLoaderImpl::RestoreCacheEntriesToSecondaryCache() {
break; break;
} }
case CacheDumpUnitType::kFilterMetaBlock: { case CacheDumpUnitType::kFilterMetaBlock: {
helper = BlocklikeTraits<Block>::GetCacheItemHelper(BlockType::kFilter); helper = BlocklikeTraits<Block>::GetCacheItemHelper(
BlockType::kFilterPartitionIndex);
std::unique_ptr<Block> block_holder; std::unique_ptr<Block> block_holder;
block_holder.reset(BlocklikeTraits<Block>::Create( block_holder.reset(BlocklikeTraits<Block>::Create(
std::move(raw_block_contents), toptions_.read_amp_bytes_per_bit, std::move(raw_block_contents), toptions_.read_amp_bytes_per_bit,

Loading…
Cancel
Save