Refactor: Add BlockTypes to make them imply C++ type in block cache (#10098)

Summary:
We have three related concepts:
* BlockType: an internal enum conceptually indicating a type of SST file
block
* CacheEntryRole: a user-facing enum for categorizing block cache entries,
which is also involved in associated cache entries with an appropriate
deleter. Can include categories for non-block cache entries (e.g. memory
reservations).
* TBlocklike: a C++ type for the actual type behind a void* cache entry.

We had some existing code ugliness because BlockType did not imply
TBlocklike, because of various kinds of "filter" block. This refactoring
fixes that with new BlockTypes.

More clean-up can come in later work.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10098

Test Plan: existing tests

Reviewed By: akankshamahajan15

Differential Revision: D36897945

Pulled By: pdillinger

fbshipit-source-id: 3ae496b5caa81e0a0ed85e873eb5b525e2d9a295
main
Peter Dillinger 2 years ago committed by Facebook GitHub Bot
parent e36008d863
commit 4f78f9699b
  1. 4
      include/rocksdb/table.h
  2. 1
      include/rocksdb/trace_record.h
  3. 12
      table/block_based/block_based_filter_block.cc
  4. 82
      table/block_based/block_based_table_builder.cc
  5. 6
      table/block_based/block_based_table_builder.h
  6. 39
      table/block_based/block_based_table_reader.cc
  7. 2
      table/block_based/block_based_table_reader.h
  8. 4
      table/block_based/block_like_traits.h
  9. 7
      table/block_based/block_type.h
  10. 8
      table/block_based/filter_block_reader_common.cc
  11. 8
      table/block_based/filter_block_reader_common.h
  12. 13
      table/block_based/full_filter_block.cc
  13. 33
      table/block_based/partitioned_filter_block.cc
  14. 2
      table/block_fetcher.cc
  15. 5
      utilities/cache_dump_load_impl.cc

@ -147,8 +147,8 @@ struct BlockBasedTableOptions {
// If cache_index_and_filter_blocks is enabled, cache index and filter
// blocks with high priority. If set to true, depending on implementation of
// block cache, index and filter blocks may be less likely to be evicted
// than data blocks.
// block cache, index, filter, and other metadata blocks may be less likely
// to be evicted than data blocks.
bool cache_index_and_filter_blocks_with_high_priority = true;
// DEPRECATED: This option will be removed in a future version. For now, this

@ -30,6 +30,7 @@ enum TraceType : char {
kTraceIteratorSeekForPrev = 6,
// Block cache tracing related trace types.
kBlockTraceIndexBlock = 7,
// TODO: split out kinds of filter blocks?
kBlockTraceFilterBlock = 8,
kBlockTraceDataBlock = 9,
kBlockTraceUncompressionDictBlock = 10,

@ -187,9 +187,9 @@ std::unique_ptr<FilterBlockReader> BlockBasedFilterBlockReader::Create(
CachableEntry<BlockContents> filter_block;
if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache,
nullptr /* get_context */, lookup_context,
&filter_block);
const Status s = ReadFilterBlock(
table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
lookup_context, &filter_block, BlockType::kDeprecatedFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>();
@ -257,7 +257,8 @@ bool BlockBasedFilterBlockReader::MayMatch(
CachableEntry<BlockContents> filter_block;
const Status s =
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block);
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block,
BlockType::kDeprecatedFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return true;
@ -316,7 +317,8 @@ std::string BlockBasedFilterBlockReader::ToString() const {
const Status s =
GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
nullptr /* lookup_context */, &filter_block);
nullptr /* lookup_context */, &filter_block,
BlockType::kDeprecatedFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return std::string("Unable to retrieve filter block");

@ -1250,8 +1250,7 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
CompressionType type,
BlockHandle* handle,
BlockType block_type,
const Slice* raw_block_contents,
bool is_top_level_filter_block) {
const Slice* raw_block_contents) {
Rep* r = rep_;
bool is_data_block = block_type == BlockType::kData;
StopWatch sw(r->ioptions.clock, r->ioptions.stats, WRITE_RAW_BLOCK_MICROS);
@ -1311,11 +1310,9 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
}
if (warm_cache) {
if (type == kNoCompression) {
s = InsertBlockInCacheHelper(block_contents, handle, block_type,
is_top_level_filter_block);
s = InsertBlockInCacheHelper(block_contents, handle, block_type);
} else if (raw_block_contents != nullptr) {
s = InsertBlockInCacheHelper(*raw_block_contents, handle, block_type,
is_top_level_filter_block);
s = InsertBlockInCacheHelper(*raw_block_contents, handle, block_type);
}
if (!s.ok()) {
r->SetStatus(s);
@ -1491,25 +1488,28 @@ Status BlockBasedTableBuilder::InsertBlockInCompressedCache(
Status BlockBasedTableBuilder::InsertBlockInCacheHelper(
const Slice& block_contents, const BlockHandle* handle,
BlockType block_type, bool is_top_level_filter_block) {
BlockType block_type) {
Status s;
if (block_type == BlockType::kData || block_type == BlockType::kIndex) {
s = InsertBlockInCache<Block>(block_contents, handle, block_type);
} else if (block_type == BlockType::kFilter) {
if (rep_->filter_builder->IsBlockBased()) {
// for block-based filter which is deprecated.
s = InsertBlockInCache<BlockContents>(block_contents, handle, block_type);
} else if (is_top_level_filter_block) {
// for top level filter block in partitioned filter.
switch (block_type) {
case BlockType::kData:
case BlockType::kIndex:
case BlockType::kFilterPartitionIndex:
s = InsertBlockInCache<Block>(block_contents, handle, block_type);
} else {
// for second level partitioned filters and full filters.
break;
case BlockType::kDeprecatedFilter:
s = InsertBlockInCache<BlockContents>(block_contents, handle, block_type);
break;
case BlockType::kFilter:
s = InsertBlockInCache<ParsedFullFilterBlock>(block_contents, handle,
block_type);
}
} else if (block_type == BlockType::kCompressionDictionary) {
s = InsertBlockInCache<UncompressionDict>(block_contents, handle,
block_type);
break;
case BlockType::kCompressionDictionary:
s = InsertBlockInCache<UncompressionDict>(block_contents, handle,
block_type);
break;
default:
// no-op / not cached
break;
}
return s;
}
@ -1563,10 +1563,14 @@ Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
void BlockBasedTableBuilder::WriteFilterBlock(
MetaIndexBuilder* meta_index_builder) {
if (rep_->filter_builder == nullptr || rep_->filter_builder->IsEmpty()) {
// No filter block needed
return;
}
BlockHandle filter_block_handle;
bool empty_filter_block =
(rep_->filter_builder == nullptr || rep_->filter_builder->IsEmpty());
if (ok() && !empty_filter_block) {
bool is_block_based_filter = rep_->filter_builder->IsBlockBased();
bool is_partitioned_filter = rep_->table_options.partition_filters;
if (ok()) {
rep_->props.num_filter_entries +=
rep_->filter_builder->EstimateEntriesAdded();
Status s = Status::Incomplete();
@ -1591,31 +1595,23 @@ void BlockBasedTableBuilder::WriteFilterBlock(
rep_->props.filter_size += filter_content.size();
// TODO: Refactor code so that BlockType can determine both the C++ type
// of a block cache entry (TBlocklike) and the CacheEntryRole while
// inserting blocks in cache.
bool top_level_filter_block = false;
if (s.ok() && rep_->table_options.partition_filters &&
!rep_->filter_builder->IsBlockBased()) {
top_level_filter_block = true;
}
WriteRawBlock(filter_content, kNoCompression, &filter_block_handle,
BlockType::kFilter, nullptr /*raw_contents*/,
top_level_filter_block);
BlockType btype = is_block_based_filter ? BlockType::kDeprecatedFilter
: is_partitioned_filter && /* last */ s.ok()
? BlockType::kFilterPartitionIndex
: BlockType::kFilter;
WriteRawBlock(filter_content, kNoCompression, &filter_block_handle, btype,
nullptr /*raw_contents*/);
}
rep_->filter_builder->ResetFilterBitsBuilder();
}
if (ok() && !empty_filter_block) {
if (ok()) {
// Add mapping from "<filter_block_prefix>.Name" to location
// of filter data.
std::string key;
if (rep_->filter_builder->IsBlockBased()) {
key = BlockBasedTable::kFilterBlockPrefix;
} else {
key = rep_->table_options.partition_filters
? BlockBasedTable::kPartitionedFilterBlockPrefix
: BlockBasedTable::kFullFilterBlockPrefix;
}
key = is_block_based_filter ? BlockBasedTable::kFilterBlockPrefix
: is_partitioned_filter
? BlockBasedTable::kPartitionedFilterBlockPrefix
: BlockBasedTable::kFullFilterBlockPrefix;
key.append(rep_->table_options.filter_policy->CompatibilityName());
meta_index_builder->Add(key, filter_block_handle);
}

@ -119,8 +119,7 @@ class BlockBasedTableBuilder : public TableBuilder {
BlockType block_type);
// Directly write data to the file.
void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle,
BlockType block_type, const Slice* raw_data = nullptr,
bool is_top_level_filter_block = false);
BlockType block_type, const Slice* raw_data = nullptr);
void SetupCacheKeyPrefix(const TableBuilderOptions& tbo);
@ -130,8 +129,7 @@ class BlockBasedTableBuilder : public TableBuilder {
Status InsertBlockInCacheHelper(const Slice& block_contents,
const BlockHandle* handle,
BlockType block_type,
bool is_top_level_filter_block);
BlockType block_type);
Status InsertBlockInCompressedCache(const Slice& block_contents,
const CompressionType type,

@ -195,6 +195,8 @@ void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
switch (block_type) {
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);
if (get_context) {
@ -252,6 +254,8 @@ void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
// TODO: introduce perf counters for misses per block type
switch (block_type) {
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
if (get_context) {
++get_context->get_context_stats_.num_cache_filter_miss;
} else {
@ -307,6 +311,8 @@ void BlockBasedTable::UpdateCacheInsertionMetrics(
switch (block_type) {
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
if (get_context) {
++get_context->get_context_stats_.num_cache_filter_add;
if (redundant) {
@ -1217,11 +1223,16 @@ Status BlockBasedTable::GetDataBlockFromCache(
: 0;
assert(block);
assert(block->IsEmpty());
// Here we treat the legacy name "...index_and_filter_blocks..." to mean all
// metadata blocks that might go into block cache, EXCEPT only those needed
// for the read path (Get, etc.). TableProperties should not be needed on the
// read path (prefix extractor setting is an O(1) size special case that we
// are working not to require from TableProperties), so it is not given
// high-priority treatment if it should go into BlockCache.
const Cache::Priority priority =
rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
(block_type == BlockType::kFilter ||
block_type == BlockType::kCompressionDictionary ||
block_type == BlockType::kIndex)
block_type != BlockType::kData &&
block_type != BlockType::kProperties
? Cache::Priority::HIGH
: Cache::Priority::LOW;
@ -1348,9 +1359,7 @@ Status BlockBasedTable::PutDataBlockToCache(
: 0;
const Cache::Priority priority =
rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
(block_type == BlockType::kFilter ||
block_type == BlockType::kCompressionDictionary ||
block_type == BlockType::kIndex)
block_type != BlockType::kData
? Cache::Priority::HIGH
: Cache::Priority::LOW;
assert(cached_block);
@ -1603,6 +1612,8 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
++get_context->get_context_stats_.num_index_read;
break;
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
++get_context->get_context_stats_.num_filter_read;
break;
case BlockType::kData:
@ -1645,6 +1656,8 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
trace_block_type = TraceType::kBlockTraceDataBlock;
break;
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
trace_block_type = TraceType::kBlockTraceFilterBlock;
break;
case BlockType::kCompressionDictionary:
@ -1759,6 +1772,8 @@ Status BlockBasedTable::RetrieveBlock(
++(get_context->get_context_stats_.num_index_read);
break;
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
++(get_context->get_context_stats_.num_filter_read);
break;
case BlockType::kData:
@ -2421,12 +2436,18 @@ Status BlockBasedTable::VerifyChecksumInBlocks(
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
const Slice& meta_block_name) {
if (meta_block_name.starts_with(kFilterBlockPrefix) ||
meta_block_name.starts_with(kFullFilterBlockPrefix) ||
meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
if (meta_block_name.starts_with(kFilterBlockPrefix)) {
return BlockType::kDeprecatedFilter;
}
if (meta_block_name.starts_with(kFullFilterBlockPrefix)) {
return BlockType::kFilter;
}
if (meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
return BlockType::kFilterPartitionIndex;
}
if (meta_block_name == kPropertiesBlockName) {
return BlockType::kProperties;
}

@ -641,7 +641,7 @@ struct BlockBasedTable::Rep {
table_reader_cache_res_handle = nullptr;
SequenceNumber get_global_seqno(BlockType block_type) const {
return (block_type == BlockType::kFilter ||
return (block_type == BlockType::kFilterPartitionIndex ||
block_type == BlockType::kCompressionDictionary)
? kDisableGlobalSequenceNumber
: global_seqno;

@ -73,7 +73,7 @@ class BlocklikeTraits<BlockContents> {
}
static Cache::CacheItemHelper* GetCacheItemHelper(BlockType block_type) {
if (block_type == BlockType::kFilter) {
if (block_type == BlockType::kDeprecatedFilter) {
return GetCacheItemHelperForRole<
BlockContents, CacheEntryRole::kDeprecatedFilterBlock>();
} else {
@ -160,7 +160,7 @@ class BlocklikeTraits<Block> {
return GetCacheItemHelperForRole<Block, CacheEntryRole::kDataBlock>();
case BlockType::kIndex:
return GetCacheItemHelperForRole<Block, CacheEntryRole::kIndexBlock>();
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
return GetCacheItemHelperForRole<Block,
CacheEntryRole::kFilterMetaBlock>();
default:

@ -14,10 +14,13 @@ namespace ROCKSDB_NAMESPACE {
// Represents the types of blocks used in the block based table format.
// See https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format
// for details.
// For code sanity, BlockType should imply a specific TBlocklike for
// BlocklikeTraits.
enum class BlockType : uint8_t {
kData,
kFilter,
kFilter, // for second level partitioned filters and full filters
kFilterPartitionIndex, // for top-level index of filter partitions
kDeprecatedFilter, // for old, deprecated block-based filter
kProperties,
kCompressionDictionary,
kRangeDeletion,

@ -16,7 +16,7 @@ Status FilterBlockReaderCommon<TBlocklike>::ReadFilterBlock(
const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
const ReadOptions& read_options, bool use_cache, GetContext* get_context,
BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) {
CachableEntry<TBlocklike>* filter_block, BlockType block_type) {
PERF_TIMER_GUARD(read_filter_block_nanos);
assert(table);
@ -29,7 +29,7 @@ Status FilterBlockReaderCommon<TBlocklike>::ReadFilterBlock(
const Status s =
table->RetrieveBlock(prefetch_buffer, read_options, rep->filter_handle,
UncompressionDict::GetEmptyDict(), filter_block,
BlockType::kFilter, get_context, lookup_context,
block_type, get_context, lookup_context,
/* for_compaction */ false, use_cache,
/* wait_for_cache */ true, /* async_read */ false);
@ -67,7 +67,7 @@ template <typename TBlocklike>
Status FilterBlockReaderCommon<TBlocklike>::GetOrReadFilterBlock(
bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) const {
CachableEntry<TBlocklike>* filter_block, BlockType block_type) const {
assert(filter_block);
if (!filter_block_.IsEmpty()) {
@ -82,7 +82,7 @@ Status FilterBlockReaderCommon<TBlocklike>::GetOrReadFilterBlock(
return ReadFilterBlock(table_, nullptr /* prefetch_buffer */, read_options,
cache_filter_blocks(), get_context, lookup_context,
filter_block);
filter_block, block_type);
}
template <typename TBlocklike>

@ -7,6 +7,8 @@
#pragma once
#include <cassert>
#include "block_type.h"
#include "table/block_based/cachable_entry.h"
#include "table/block_based/filter_block.h"
@ -46,7 +48,8 @@ class FilterBlockReaderCommon : public FilterBlockReader {
const ReadOptions& read_options, bool use_cache,
GetContext* get_context,
BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block);
CachableEntry<TBlocklike>* filter_block,
BlockType block_type);
const BlockBasedTable* table() const { return table_; }
const SliceTransform* table_prefix_extractor() const;
@ -55,7 +58,8 @@ class FilterBlockReaderCommon : public FilterBlockReader {
Status GetOrReadFilterBlock(bool no_io, GetContext* get_context,
BlockCacheLookupContext* lookup_context,
CachableEntry<TBlocklike>* filter_block) const;
CachableEntry<TBlocklike>* filter_block,
BlockType block_type) const;
size_t ApproximateFilterBlockMemoryUsage() const;

@ -4,8 +4,10 @@
// (found in the LICENSE.Apache file in the root directory).
#include "table/block_based/full_filter_block.h"
#include <array>
#include "block_type.h"
#include "monitoring/perf_context_imp.h"
#include "port/malloc.h"
#include "port/port.h"
@ -149,7 +151,7 @@ std::unique_ptr<FilterBlockReader> FullFilterBlockReader::Create(
if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache,
nullptr /* get_context */, lookup_context,
&filter_block);
&filter_block, BlockType::kFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>();
@ -181,8 +183,8 @@ bool FullFilterBlockReader::MayMatch(
BlockCacheLookupContext* lookup_context) const {
CachableEntry<ParsedFullFilterBlock> filter_block;
const Status s =
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block);
const Status s = GetOrReadFilterBlock(no_io, get_context, lookup_context,
&filter_block, BlockType::kFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return true;
@ -237,8 +239,9 @@ void FullFilterBlockReader::MayMatch(
BlockCacheLookupContext* lookup_context) const {
CachableEntry<ParsedFullFilterBlock> filter_block;
const Status s = GetOrReadFilterBlock(no_io, range->begin()->get_context,
lookup_context, &filter_block);
const Status s =
GetOrReadFilterBlock(no_io, range->begin()->get_context, lookup_context,
&filter_block, BlockType::kFilter);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return;

@ -7,6 +7,7 @@
#include <utility>
#include "block_type.h"
#include "file/random_access_file_reader.h"
#include "logging/logging.h"
#include "monitoring/perf_context_imp.h"
@ -197,9 +198,9 @@ std::unique_ptr<FilterBlockReader> PartitionedFilterBlockReader::Create(
CachableEntry<Block> filter_block;
if (prefetch || !use_cache) {
const Status s = ReadFilterBlock(table, prefetch_buffer, ro, use_cache,
nullptr /* get_context */, lookup_context,
&filter_block);
const Status s = ReadFilterBlock(
table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
lookup_context, &filter_block, BlockType::kFilterPartitionIndex);
if (!s.ok()) {
IGNORE_STATUS_IF_ERROR(s);
return std::unique_ptr<FilterBlockReader>();
@ -277,9 +278,10 @@ BlockHandle PartitionedFilterBlockReader::GetFilterPartitionHandle(
Statistics* kNullStats = nullptr;
filter_block.GetValue()->NewIndexIterator(
comparator->user_comparator(),
table()->get_rep()->get_global_seqno(BlockType::kFilter), &iter,
kNullStats, true /* total_order_seek */, false /* have_first_key */,
index_key_includes_seq(), index_value_is_full());
table()->get_rep()->get_global_seqno(BlockType::kFilterPartitionIndex),
&iter, kNullStats, true /* total_order_seek */,
false /* have_first_key */, index_key_includes_seq(),
index_value_is_full());
iter.Seek(entry);
if (UNLIKELY(!iter.Valid())) {
// entry is larger than all the keys. However its prefix might still be
@ -335,7 +337,8 @@ bool PartitionedFilterBlockReader::MayMatch(
FilterFunction filter_function) const {
CachableEntry<Block> filter_block;
Status s =
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block);
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block,
BlockType::kFilterPartitionIndex);
if (UNLIKELY(!s.ok())) {
IGNORE_STATUS_IF_ERROR(s);
return true;
@ -371,8 +374,9 @@ void PartitionedFilterBlockReader::MayMatch(
uint64_t block_offset, bool no_io, BlockCacheLookupContext* lookup_context,
FilterManyFunction filter_function) const {
CachableEntry<Block> filter_block;
Status s = GetOrReadFilterBlock(no_io, range->begin()->get_context,
lookup_context, &filter_block);
Status s =
GetOrReadFilterBlock(no_io, range->begin()->get_context, lookup_context,
&filter_block, BlockType::kFilterPartitionIndex);
if (UNLIKELY(!s.ok())) {
IGNORE_STATUS_IF_ERROR(s);
return; // Any/all may match
@ -463,7 +467,8 @@ Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
CachableEntry<Block> filter_block;
Status s = GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
&lookup_context, &filter_block);
&lookup_context, &filter_block,
BlockType::kFilterPartitionIndex);
if (!s.ok()) {
ROCKS_LOG_ERROR(rep->ioptions.logger,
"Error retrieving top-level filter block while trying to "
@ -479,10 +484,10 @@ Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
const InternalKeyComparator* const comparator = internal_comparator();
Statistics* kNullStats = nullptr;
filter_block.GetValue()->NewIndexIterator(
comparator->user_comparator(), rep->get_global_seqno(BlockType::kFilter),
&biter, kNullStats, true /* total_order_seek */,
false /* have_first_key */, index_key_includes_seq(),
index_value_is_full());
comparator->user_comparator(),
rep->get_global_seqno(BlockType::kFilterPartitionIndex), &biter,
kNullStats, true /* total_order_seek */, false /* have_first_key */,
index_key_includes_seq(), index_value_is_full());
// Index partitions are assumed to be consecuitive. Prefetch them all.
// Read the first block offset
biter.SeekToFirst();

@ -284,6 +284,8 @@ IOStatus BlockFetcher::ReadBlockContents() {
// TODO: introduce dedicated perf counter for range tombstones
switch (block_type_) {
case BlockType::kFilter:
case BlockType::kFilterPartitionIndex:
case BlockType::kDeprecatedFilter:
PERF_COUNTER_ADD(filter_block_read_count, 1);
break;

@ -324,7 +324,7 @@ IOStatus CacheDumpedLoaderImpl::RestoreCacheEntriesToSecondaryCache() {
switch (dump_unit.type) {
case CacheDumpUnitType::kDeprecatedFilterBlock: {
helper = BlocklikeTraits<BlockContents>::GetCacheItemHelper(
BlockType::kFilter);
BlockType::kDeprecatedFilter);
std::unique_ptr<BlockContents> block_holder;
block_holder.reset(BlocklikeTraits<BlockContents>::Create(
std::move(raw_block_contents), 0, statistics, false,
@ -376,7 +376,8 @@ IOStatus CacheDumpedLoaderImpl::RestoreCacheEntriesToSecondaryCache() {
break;
}
case CacheDumpUnitType::kFilterMetaBlock: {
helper = BlocklikeTraits<Block>::GetCacheItemHelper(BlockType::kFilter);
helper = BlocklikeTraits<Block>::GetCacheItemHelper(
BlockType::kFilterPartitionIndex);
std::unique_ptr<Block> block_holder;
block_holder.reset(BlocklikeTraits<Block>::Create(
std::move(raw_block_contents), toptions_.read_amp_bytes_per_bit,

Loading…
Cancel
Save