Remove deprecated block-based filter (#10184)
Summary: In https://github.com/facebook/rocksdb/issues/9535, release 7.0, we hid the old block-based filter from being created using the public API, because of its inefficiency. Although we normally maintain read compatibility on old DBs forever, filters are not required for reading a DB, only for optimizing read performance. Thus, it should be acceptable to remove this code and the substantial maintenance burden it carries as useful features are developed and validated (such as user timestamp). This change completely removes the code for reading and writing the old block-based filters, net removing about 1370 lines of code no longer needed. Options removed from testing / benchmarking tools. The prior existence is only evident in a couple of places: * `CacheEntryRole::kDeprecatedFilterBlock` - We can update this public API enum in a major release to minimize source code incompatibilities. * A warning is logged when an old table file is opened that used the old block-based filter. This is provided as a courtesy, and would be a pain to unit test, so manual testing should suffice. Unfortunately, sst_dump does not tell you whether a file uses block-based filter, and the structure of the code makes it very difficult to fix. * To detect that case, `kObsoleteFilterBlockPrefix` (renamed from `kFilterBlockPrefix`) for metaindex is maintained (for now). Other notes: * In some cases where numbers are associated with filter configurations, we have had to update the assigned numbers so that they all correspond to something that exists. * Fixed potential stat counting bug by assuming `filter_checked = false` for cases like `filter == nullptr` rather than assuming `filter_checked = true` * Removed obsolete `block_offset` and `prefix_extractor` parameters from several functions. * Removed some unnecessary checks `if (!table_prefix_extractor() && !prefix_extractor)` because the caller guarantees the prefix extractor exists and is compatible Pull Request resolved: https://github.com/facebook/rocksdb/pull/10184 Test Plan: tests updated, manually test new warning in LOG using base version to generate a DB Reviewed By: riversand963 Differential Revision: D37212647 Pulled By: pdillinger fbshipit-source-id: 06ee020d8de3b81260ffc36ad0c1202cbf463a80main
parent
a6691d0f65
commit
126c223714
@ -1,358 +0,0 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "table/block_based/block_based_filter_block.h" |
||||
|
||||
#include <algorithm> |
||||
|
||||
#include "db/dbformat.h" |
||||
#include "monitoring/perf_context_imp.h" |
||||
#include "rocksdb/filter_policy.h" |
||||
#include "table/block_based/block_based_table_reader.h" |
||||
#include "util/cast_util.h" |
||||
#include "util/coding.h" |
||||
#include "util/string_util.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
|
||||
namespace { |
||||
|
||||
void AppendItem(std::string* props, const std::string& key, |
||||
const std::string& value) { |
||||
char cspace = ' '; |
||||
std::string value_str(""); |
||||
size_t i = 0; |
||||
const size_t dataLength = 64; |
||||
const size_t tabLength = 2; |
||||
const size_t offLength = 16; |
||||
|
||||
value_str.append(&value[i], std::min(size_t(dataLength), value.size())); |
||||
i += dataLength; |
||||
while (i < value.size()) { |
||||
value_str.append("\n"); |
||||
value_str.append(offLength, cspace); |
||||
value_str.append(&value[i], std::min(size_t(dataLength), value.size() - i)); |
||||
i += dataLength; |
||||
} |
||||
|
||||
std::string result(""); |
||||
if (key.size() < (offLength - tabLength)) |
||||
result.append(size_t((offLength - tabLength)) - key.size(), cspace); |
||||
result.append(key); |
||||
|
||||
props->append(result + ": " + value_str + "\n"); |
||||
} |
||||
|
||||
template <class TKey> |
||||
void AppendItem(std::string* props, const TKey& key, const std::string& value) { |
||||
std::string key_str = std::to_string(key); |
||||
AppendItem(props, key_str, value); |
||||
} |
||||
} // namespace
|
||||
|
||||
// See doc/table_format.txt for an explanation of the filter block format.
|
||||
|
||||
// Generate new filter every 2KB of data
|
||||
static const size_t kFilterBaseLg = 11; |
||||
static const size_t kFilterBase = 1 << kFilterBaseLg; |
||||
|
||||
BlockBasedFilterBlockBuilder::BlockBasedFilterBlockBuilder( |
||||
const SliceTransform* prefix_extractor, |
||||
const BlockBasedTableOptions& table_opt, int bits_per_key) |
||||
: prefix_extractor_(prefix_extractor), |
||||
whole_key_filtering_(table_opt.whole_key_filtering), |
||||
bits_per_key_(bits_per_key), |
||||
prev_prefix_start_(0), |
||||
prev_prefix_size_(0), |
||||
total_added_in_built_(0) {} |
||||
|
||||
void BlockBasedFilterBlockBuilder::StartBlock(uint64_t block_offset) { |
||||
uint64_t filter_index = (block_offset / kFilterBase); |
||||
assert(filter_index >= filter_offsets_.size()); |
||||
while (filter_index > filter_offsets_.size()) { |
||||
GenerateFilter(); |
||||
} |
||||
} |
||||
|
||||
size_t BlockBasedFilterBlockBuilder::EstimateEntriesAdded() { |
||||
return total_added_in_built_ + start_.size(); |
||||
} |
||||
|
||||
void BlockBasedFilterBlockBuilder::Add(const Slice& key_without_ts) { |
||||
if (prefix_extractor_ && prefix_extractor_->InDomain(key_without_ts)) { |
||||
AddPrefix(key_without_ts); |
||||
} |
||||
|
||||
if (whole_key_filtering_) { |
||||
AddKey(key_without_ts); |
||||
} |
||||
} |
||||
|
||||
// Add key to filter if needed
|
||||
inline void BlockBasedFilterBlockBuilder::AddKey(const Slice& key) { |
||||
start_.push_back(entries_.size()); |
||||
entries_.append(key.data(), key.size()); |
||||
} |
||||
|
||||
// Add prefix to filter if needed
|
||||
inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) { |
||||
// get slice for most recently added entry
|
||||
Slice prev; |
||||
if (prev_prefix_size_ > 0) { |
||||
prev = Slice(entries_.data() + prev_prefix_start_, prev_prefix_size_); |
||||
} |
||||
|
||||
Slice prefix = prefix_extractor_->Transform(key); |
||||
// insert prefix only when it's different from the previous prefix.
|
||||
if (prev.size() == 0 || prefix != prev) { |
||||
prev_prefix_start_ = entries_.size(); |
||||
prev_prefix_size_ = prefix.size(); |
||||
AddKey(prefix); |
||||
} |
||||
} |
||||
|
||||
Slice BlockBasedFilterBlockBuilder::Finish( |
||||
const BlockHandle& /*tmp*/, Status* status, |
||||
std::unique_ptr<const char[]>* /* filter_data */) { |
||||
// In this impl we ignore BlockHandle and filter_data
|
||||
*status = Status::OK(); |
||||
|
||||
if (!start_.empty()) { |
||||
GenerateFilter(); |
||||
} |
||||
|
||||
// Append array of per-filter offsets
|
||||
const uint32_t array_offset = static_cast<uint32_t>(result_.size()); |
||||
for (size_t i = 0; i < filter_offsets_.size(); i++) { |
||||
PutFixed32(&result_, filter_offsets_[i]); |
||||
} |
||||
|
||||
PutFixed32(&result_, array_offset); |
||||
result_.push_back(kFilterBaseLg); // Save encoding parameter in result
|
||||
return Slice(result_); |
||||
} |
||||
|
||||
void BlockBasedFilterBlockBuilder::GenerateFilter() { |
||||
const size_t num_entries = start_.size(); |
||||
if (num_entries == 0) { |
||||
// Fast path if there are no keys for this filter
|
||||
filter_offsets_.push_back(static_cast<uint32_t>(result_.size())); |
||||
return; |
||||
} |
||||
total_added_in_built_ += num_entries; |
||||
|
||||
// Make list of keys from flattened key structure
|
||||
start_.push_back(entries_.size()); // Simplify length computation
|
||||
tmp_entries_.resize(num_entries); |
||||
for (size_t i = 0; i < num_entries; i++) { |
||||
const char* base = entries_.data() + start_[i]; |
||||
size_t length = start_[i + 1] - start_[i]; |
||||
tmp_entries_[i] = Slice(base, length); |
||||
} |
||||
|
||||
// Generate filter for current set of keys and append to result_.
|
||||
filter_offsets_.push_back(static_cast<uint32_t>(result_.size())); |
||||
DeprecatedBlockBasedBloomFilterPolicy::CreateFilter( |
||||
tmp_entries_.data(), static_cast<int>(num_entries), bits_per_key_, |
||||
&result_); |
||||
|
||||
tmp_entries_.clear(); |
||||
entries_.clear(); |
||||
start_.clear(); |
||||
prev_prefix_start_ = 0; |
||||
prev_prefix_size_ = 0; |
||||
} |
||||
|
||||
BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( |
||||
const BlockBasedTable* t, CachableEntry<BlockContents>&& filter_block) |
||||
: FilterBlockReaderCommon(t, std::move(filter_block)) { |
||||
assert(table()); |
||||
assert(table()->get_rep()); |
||||
assert(table()->get_rep()->filter_policy); |
||||
} |
||||
|
||||
std::unique_ptr<FilterBlockReader> BlockBasedFilterBlockReader::Create( |
||||
const BlockBasedTable* table, const ReadOptions& ro, |
||||
FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch, |
||||
bool pin, BlockCacheLookupContext* lookup_context) { |
||||
assert(table); |
||||
assert(table->get_rep()); |
||||
assert(!pin || prefetch); |
||||
|
||||
CachableEntry<BlockContents> filter_block; |
||||
if (prefetch || !use_cache) { |
||||
const Status s = ReadFilterBlock( |
||||
table, prefetch_buffer, ro, use_cache, nullptr /* get_context */, |
||||
lookup_context, &filter_block, BlockType::kDeprecatedFilter); |
||||
if (!s.ok()) { |
||||
IGNORE_STATUS_IF_ERROR(s); |
||||
return std::unique_ptr<FilterBlockReader>(); |
||||
} |
||||
|
||||
if (use_cache && !pin) { |
||||
filter_block.Reset(); |
||||
} |
||||
} |
||||
|
||||
return std::unique_ptr<FilterBlockReader>( |
||||
new BlockBasedFilterBlockReader(table, std::move(filter_block))); |
||||
} |
||||
|
||||
bool BlockBasedFilterBlockReader::KeyMayMatch( |
||||
const Slice& key, const SliceTransform* /* prefix_extractor */, |
||||
uint64_t block_offset, const bool no_io, |
||||
const Slice* const /*const_ikey_ptr*/, GetContext* get_context, |
||||
BlockCacheLookupContext* lookup_context) { |
||||
assert(block_offset != kNotValid); |
||||
if (!whole_key_filtering()) { |
||||
return true; |
||||
} |
||||
return MayMatch(key, block_offset, no_io, get_context, lookup_context); |
||||
} |
||||
|
||||
bool BlockBasedFilterBlockReader::PrefixMayMatch( |
||||
const Slice& prefix, const SliceTransform* /* prefix_extractor */, |
||||
uint64_t block_offset, const bool no_io, |
||||
const Slice* const /*const_ikey_ptr*/, GetContext* get_context, |
||||
BlockCacheLookupContext* lookup_context) { |
||||
assert(block_offset != kNotValid); |
||||
return MayMatch(prefix, block_offset, no_io, get_context, lookup_context); |
||||
} |
||||
|
||||
bool BlockBasedFilterBlockReader::ParseFieldsFromBlock( |
||||
const BlockContents& contents, const char** data, const char** offset, |
||||
size_t* num, size_t* base_lg) { |
||||
assert(data); |
||||
assert(offset); |
||||
assert(num); |
||||
assert(base_lg); |
||||
|
||||
const size_t n = contents.data.size(); |
||||
if (n < 5) { // 1 byte for base_lg and 4 for start of offset array
|
||||
return false; |
||||
} |
||||
|
||||
const uint32_t last_word = DecodeFixed32(contents.data.data() + n - 5); |
||||
if (last_word > n - 5) { |
||||
return false; |
||||
} |
||||
|
||||
*data = contents.data.data(); |
||||
*offset = (*data) + last_word; |
||||
*num = (n - 5 - last_word) / 4; |
||||
*base_lg = contents.data[n - 1]; |
||||
|
||||
return true; |
||||
} |
||||
|
||||
bool BlockBasedFilterBlockReader::MayMatch( |
||||
const Slice& entry, uint64_t block_offset, bool no_io, |
||||
GetContext* get_context, BlockCacheLookupContext* lookup_context) const { |
||||
CachableEntry<BlockContents> filter_block; |
||||
|
||||
const Status s = |
||||
GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block, |
||||
BlockType::kDeprecatedFilter); |
||||
if (!s.ok()) { |
||||
IGNORE_STATUS_IF_ERROR(s); |
||||
return true; |
||||
} |
||||
|
||||
assert(filter_block.GetValue()); |
||||
|
||||
const char* data = nullptr; |
||||
const char* offset = nullptr; |
||||
size_t num = 0; |
||||
size_t base_lg = 0; |
||||
if (!ParseFieldsFromBlock(*filter_block.GetValue(), &data, &offset, &num, |
||||
&base_lg)) { |
||||
return true; // Errors are treated as potential matches
|
||||
} |
||||
|
||||
const uint64_t index = block_offset >> base_lg; |
||||
if (index < num) { |
||||
const uint32_t start = DecodeFixed32(offset + index * 4); |
||||
const uint32_t limit = DecodeFixed32(offset + index * 4 + 4); |
||||
if (start <= limit && limit <= (uint32_t)(offset - data)) { |
||||
const Slice filter = Slice(data + start, limit - start); |
||||
|
||||
assert(table()); |
||||
assert(table()->get_rep()); |
||||
|
||||
const bool may_match = |
||||
DeprecatedBlockBasedBloomFilterPolicy::KeyMayMatch(entry, filter); |
||||
if (may_match) { |
||||
PERF_COUNTER_ADD(bloom_sst_hit_count, 1); |
||||
return true; |
||||
} else { |
||||
PERF_COUNTER_ADD(bloom_sst_miss_count, 1); |
||||
return false; |
||||
} |
||||
} else if (start == limit) { |
||||
// Empty filters do not match any entries
|
||||
return false; |
||||
} |
||||
} |
||||
return true; // Errors are treated as potential matches
|
||||
} |
||||
|
||||
size_t BlockBasedFilterBlockReader::ApproximateMemoryUsage() const { |
||||
size_t usage = ApproximateFilterBlockMemoryUsage(); |
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE |
||||
usage += malloc_usable_size(const_cast<BlockBasedFilterBlockReader*>(this)); |
||||
#else |
||||
usage += sizeof(*this); |
||||
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
||||
return usage; |
||||
} |
||||
|
||||
std::string BlockBasedFilterBlockReader::ToString() const { |
||||
CachableEntry<BlockContents> filter_block; |
||||
|
||||
const Status s = |
||||
GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */, |
||||
nullptr /* lookup_context */, &filter_block, |
||||
BlockType::kDeprecatedFilter); |
||||
if (!s.ok()) { |
||||
IGNORE_STATUS_IF_ERROR(s); |
||||
return std::string("Unable to retrieve filter block"); |
||||
} |
||||
|
||||
assert(filter_block.GetValue()); |
||||
|
||||
const char* data = nullptr; |
||||
const char* offset = nullptr; |
||||
size_t num = 0; |
||||
size_t base_lg = 0; |
||||
if (!ParseFieldsFromBlock(*filter_block.GetValue(), &data, &offset, &num, |
||||
&base_lg)) { |
||||
return std::string("Error parsing filter block"); |
||||
} |
||||
|
||||
std::string result; |
||||
result.reserve(1024); |
||||
|
||||
std::string s_bo("Block offset"), s_hd("Hex dump"), s_fb("# filter blocks"); |
||||
AppendItem(&result, s_fb, std::to_string(num)); |
||||
AppendItem(&result, s_bo, s_hd); |
||||
|
||||
for (size_t index = 0; index < num; index++) { |
||||
uint32_t start = DecodeFixed32(offset + index * 4); |
||||
uint32_t limit = DecodeFixed32(offset + index * 4 + 4); |
||||
|
||||
if (start != limit) { |
||||
result.append(" filter block # " + std::to_string(index + 1) + "\n"); |
||||
Slice filter = Slice(data + start, limit - start); |
||||
AppendItem(&result, start, filter.ToString(true)); |
||||
} |
||||
} |
||||
return result; |
||||
} |
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,127 +0,0 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
//
|
||||
// A filter block is stored near the end of a Table file. It contains
|
||||
// filters (e.g., bloom filters) for all data blocks in the table combined
|
||||
// into a single filter block.
|
||||
|
||||
#pragma once |
||||
|
||||
#include <stddef.h> |
||||
#include <stdint.h> |
||||
|
||||
#include <memory> |
||||
#include <string> |
||||
#include <vector> |
||||
|
||||
#include "rocksdb/options.h" |
||||
#include "rocksdb/slice.h" |
||||
#include "rocksdb/slice_transform.h" |
||||
#include "table/block_based/filter_block_reader_common.h" |
||||
#include "table/block_based/filter_policy_internal.h" |
||||
#include "table/format.h" |
||||
#include "util/hash.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
|
||||
// A BlockBasedFilterBlockBuilder is used to construct all of the filters for a
|
||||
// particular Table. It generates a single string which is stored as
|
||||
// a special block in the Table.
|
||||
//
|
||||
// The sequence of calls to BlockBasedFilterBlockBuilder must match the regexp:
|
||||
// (StartBlock Add*)* Finish
|
||||
class BlockBasedFilterBlockBuilder : public FilterBlockBuilder { |
||||
public: |
||||
BlockBasedFilterBlockBuilder(const SliceTransform* prefix_extractor, |
||||
const BlockBasedTableOptions& table_opt, |
||||
int bits_per_key); |
||||
// No copying allowed
|
||||
BlockBasedFilterBlockBuilder(const BlockBasedFilterBlockBuilder&) = delete; |
||||
void operator=(const BlockBasedFilterBlockBuilder&) = delete; |
||||
|
||||
virtual bool IsBlockBased() override { return true; } |
||||
virtual void StartBlock(uint64_t block_offset) override; |
||||
virtual void Add(const Slice& key_without_ts) override; |
||||
virtual bool IsEmpty() const override { |
||||
return start_.empty() && filter_offsets_.empty(); |
||||
} |
||||
virtual size_t EstimateEntriesAdded() override; |
||||
virtual Slice Finish( |
||||
const BlockHandle& tmp, Status* status, |
||||
std::unique_ptr<const char[]>* filter_data = nullptr) override; |
||||
using FilterBlockBuilder::Finish; |
||||
|
||||
private: |
||||
void AddKey(const Slice& key); |
||||
void AddPrefix(const Slice& key); |
||||
void GenerateFilter(); |
||||
|
||||
// important: all of these might point to invalid addresses
|
||||
// at the time of destruction of this filter block. destructor
|
||||
// should NOT dereference them.
|
||||
const SliceTransform* prefix_extractor_; |
||||
bool whole_key_filtering_; |
||||
int bits_per_key_; |
||||
|
||||
size_t prev_prefix_start_; // the position of the last appended prefix
|
||||
// to "entries_".
|
||||
size_t prev_prefix_size_; // the length of the last appended prefix to
|
||||
// "entries_".
|
||||
std::string entries_; // Flattened entry contents
|
||||
std::vector<size_t> start_; // Starting index in entries_ of each entry
|
||||
std::string result_; // Filter data computed so far
|
||||
std::vector<Slice> tmp_entries_; // policy_->CreateFilter() argument
|
||||
std::vector<uint32_t> filter_offsets_; |
||||
uint64_t total_added_in_built_; // Total keys added to filters built so far
|
||||
}; |
||||
|
||||
// A FilterBlockReader is used to parse filter from SST table.
|
||||
// KeyMayMatch and PrefixMayMatch would trigger filter checking
|
||||
class BlockBasedFilterBlockReader |
||||
: public FilterBlockReaderCommon<BlockContents> { |
||||
public: |
||||
BlockBasedFilterBlockReader(const BlockBasedTable* t, |
||||
CachableEntry<BlockContents>&& filter_block); |
||||
// No copying allowed
|
||||
BlockBasedFilterBlockReader(const BlockBasedFilterBlockReader&) = delete; |
||||
void operator=(const BlockBasedFilterBlockReader&) = delete; |
||||
|
||||
static std::unique_ptr<FilterBlockReader> Create( |
||||
const BlockBasedTable* table, const ReadOptions& ro, |
||||
FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch, |
||||
bool pin, BlockCacheLookupContext* lookup_context); |
||||
|
||||
bool IsBlockBased() override { return true; } |
||||
|
||||
bool KeyMayMatch(const Slice& key, const SliceTransform* prefix_extractor, |
||||
uint64_t block_offset, const bool no_io, |
||||
const Slice* const const_ikey_ptr, GetContext* get_context, |
||||
BlockCacheLookupContext* lookup_context) override; |
||||
bool PrefixMayMatch(const Slice& prefix, |
||||
const SliceTransform* prefix_extractor, |
||||
uint64_t block_offset, const bool no_io, |
||||
const Slice* const const_ikey_ptr, |
||||
GetContext* get_context, |
||||
BlockCacheLookupContext* lookup_context) override; |
||||
size_t ApproximateMemoryUsage() const override; |
||||
|
||||
// convert this object to a human readable form
|
||||
std::string ToString() const override; |
||||
|
||||
private: |
||||
static bool ParseFieldsFromBlock(const BlockContents& contents, |
||||
const char** data, const char** offset, |
||||
size_t* num, size_t* base_lg); |
||||
|
||||
bool MayMatch(const Slice& entry, uint64_t block_offset, bool no_io, |
||||
GetContext* get_context, |
||||
BlockCacheLookupContext* lookup_context) const; |
||||
}; |
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
@ -1,219 +0,0 @@ |
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "table/block_based/block_based_filter_block.h" |
||||
#include "rocksdb/filter_policy.h" |
||||
#include "table/block_based/block_based_table_reader.h" |
||||
#include "table/block_based/mock_block_based_table.h" |
||||
#include "test_util/testharness.h" |
||||
#include "test_util/testutil.h" |
||||
#include "util/coding.h" |
||||
#include "util/hash.h" |
||||
#include "util/string_util.h" |
||||
|
||||
namespace ROCKSDB_NAMESPACE { |
||||
|
||||
// Test for block based filter block
|
||||
// use new interface in FilterPolicy to create filter builder/reader
|
||||
class BlockBasedFilterBlockTest : public mock::MockBlockBasedTableTester, |
||||
public testing::Test { |
||||
public: |
||||
BlockBasedFilterBlockTest() |
||||
: mock::MockBlockBasedTableTester(NewBloomFilterPolicy(10, true)) {} |
||||
}; |
||||
|
||||
TEST_F(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { |
||||
FilterBlockBuilder* builder = |
||||
new BlockBasedFilterBlockBuilder(nullptr, table_options_, 10); |
||||
Slice slice(builder->Finish()); |
||||
ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(slice)); |
||||
|
||||
CachableEntry<BlockContents> block( |
||||
new BlockContents(slice), nullptr /* cache */, nullptr /* cache_handle */, |
||||
true /* own_value */); |
||||
|
||||
FilterBlockReader* reader = |
||||
new BlockBasedFilterBlockReader(table_.get(), std::move(block)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/uint64_t{0}, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/10000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
delete builder; |
||||
delete reader; |
||||
} |
||||
|
||||
TEST_F(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { |
||||
FilterBlockBuilder* builder = |
||||
new BlockBasedFilterBlockBuilder(nullptr, table_options_, 10); |
||||
builder->StartBlock(100); |
||||
builder->Add("foo"); |
||||
builder->Add("bar"); |
||||
builder->Add("box"); |
||||
builder->StartBlock(200); |
||||
builder->Add("box"); |
||||
builder->StartBlock(300); |
||||
builder->Add("hello"); |
||||
Slice slice(builder->Finish()); |
||||
|
||||
CachableEntry<BlockContents> block( |
||||
new BlockContents(slice), nullptr /* cache */, nullptr /* cache_handle */, |
||||
true /* own_value */); |
||||
|
||||
FilterBlockReader* reader = |
||||
new BlockBasedFilterBlockReader(table_.get(), std::move(block)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"bar", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"box", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"hello", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"missing", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"other", /*prefix_extractor=*/nullptr, /*block_offset=*/100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
delete builder; |
||||
delete reader; |
||||
} |
||||
|
||||
TEST_F(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { |
||||
FilterBlockBuilder* builder = |
||||
new BlockBasedFilterBlockBuilder(nullptr, table_options_, 10); |
||||
|
||||
// First filter
|
||||
builder->StartBlock(0); |
||||
builder->Add("foo"); |
||||
builder->StartBlock(2000); |
||||
builder->Add("bar"); |
||||
|
||||
// Second filter
|
||||
builder->StartBlock(3100); |
||||
builder->Add("box"); |
||||
|
||||
// Third filter is empty
|
||||
|
||||
// Last filter
|
||||
builder->StartBlock(9000); |
||||
builder->Add("box"); |
||||
builder->Add("hello"); |
||||
|
||||
Slice slice(builder->Finish()); |
||||
|
||||
CachableEntry<BlockContents> block( |
||||
new BlockContents(slice), nullptr /* cache */, nullptr /* cache_handle */, |
||||
true /* own_value */); |
||||
|
||||
FilterBlockReader* reader = |
||||
new BlockBasedFilterBlockReader(table_.get(), std::move(block)); |
||||
|
||||
// Check first filter
|
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/uint64_t{0}, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"bar", /*prefix_extractor=*/nullptr, /*block_offset=*/2000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"box", /*prefix_extractor=*/nullptr, /*block_offset=*/uint64_t{0}, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"hello", /*prefix_extractor=*/nullptr, /*block_offset=*/uint64_t{0}, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
// Check second filter
|
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"box", /*prefix_extractor=*/nullptr, /*block_offset=*/3100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/3100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"bar", /*prefix_extractor=*/nullptr, /*block_offset=*/3100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"hello", /*prefix_extractor=*/nullptr, /*block_offset=*/3100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
// Check third filter (empty)
|
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/4100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"bar", /*prefix_extractor=*/nullptr, /*block_offset=*/4100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"box", /*prefix_extractor=*/nullptr, /*block_offset=*/4100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"hello", /*prefix_extractor=*/nullptr, /*block_offset=*/4100, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
// Check last filter
|
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"box", /*prefix_extractor=*/nullptr, /*block_offset=*/9000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(reader->KeyMayMatch( |
||||
"hello", /*prefix_extractor=*/nullptr, /*block_offset=*/9000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"foo", /*prefix_extractor=*/nullptr, /*block_offset=*/9000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
ASSERT_TRUE(!reader->KeyMayMatch( |
||||
"bar", /*prefix_extractor=*/nullptr, /*block_offset=*/9000, |
||||
/*no_io=*/false, /*const_ikey_ptr=*/nullptr, /*get_context=*/nullptr, |
||||
/*lookup_context=*/nullptr)); |
||||
|
||||
delete builder; |
||||
delete reader; |
||||
} |
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
int main(int argc, char** argv) { |
||||
::testing::InitGoogleTest(&argc, argv); |
||||
return RUN_ALL_TESTS(); |
||||
} |
Loading…
Reference in new issue