|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include <map>
|
|
|
|
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
#include "table/block_based/partitioned_filter_block.h"
|
|
|
|
#include "table/block_based/filter_policy_internal.h"
|
|
|
|
|
|
|
|
#include "index_builder.h"
|
|
|
|
#include "logging/logging.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/hash.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
std::map<uint64_t, std::string> blooms;
|
|
|
|
|
|
|
|
class MockedBlockBasedTable : public BlockBasedTable {
|
|
|
|
public:
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
MockedBlockBasedTable(Rep* rep, PartitionedIndexBuilder* pib)
|
|
|
|
: BlockBasedTable(rep, /*block_cache_tracer=*/nullptr) {
|
|
|
|
// Initialize what Open normally does as much as necessary for the test
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
rep->index_key_includes_seq = pib->seperator_is_key_plus_seq();
|
|
|
|
rep->index_value_is_full = !pib->get_use_value_delta_encoding();
|
|
|
|
}
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
};
|
|
|
|
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
class MyPartitionedFilterBlockReader : public PartitionedFilterBlockReader {
|
|
|
|
public:
|
|
|
|
MyPartitionedFilterBlockReader(BlockBasedTable* t,
|
|
|
|
CachableEntry<Block>&& filter_block)
|
|
|
|
: PartitionedFilterBlockReader(t, std::move(filter_block)) {
|
|
|
|
for (const auto& pair : blooms) {
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
const uint64_t offset = pair.first;
|
|
|
|
const std::string& bloom = pair.second;
|
|
|
|
|
|
|
|
assert(t);
|
|
|
|
assert(t->get_rep());
|
|
|
|
CachableEntry<ParsedFullFilterBlock> block(
|
|
|
|
new ParsedFullFilterBlock(
|
|
|
|
t->get_rep()->table_options.filter_policy.get(),
|
|
|
|
BlockContents(Slice(bloom))),
|
|
|
|
nullptr /* cache */, nullptr /* cache_handle */,
|
|
|
|
true /* own_value */);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
filter_map_[offset] = std::move(block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class PartitionedFilterBlockTest
|
|
|
|
: public testing::Test,
|
|
|
|
virtual public ::testing::WithParamInterface<uint32_t> {
|
|
|
|
public:
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
Options options_;
|
|
|
|
ImmutableCFOptions ioptions_;
|
|
|
|
EnvOptions env_options_;
|
|
|
|
BlockBasedTableOptions table_options_;
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
InternalKeyComparator icomp_;
|
|
|
|
std::unique_ptr<BlockBasedTable> table_;
|
|
|
|
std::shared_ptr<Cache> cache_;
|
|
|
|
int bits_per_key_;
|
|
|
|
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
PartitionedFilterBlockTest()
|
|
|
|
: ioptions_(options_),
|
|
|
|
env_options_(options_),
|
|
|
|
icomp_(options_.comparator),
|
|
|
|
bits_per_key_(10) {
|
|
|
|
table_options_.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(bits_per_key_, false));
|
|
|
|
table_options_.format_version = GetParam();
|
|
|
|
table_options_.index_block_restart_interval = 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
~PartitionedFilterBlockTest() override {}
|
|
|
|
|
|
|
|
const std::string keys[4] = {"afoo", "bar", "box", "hello"};
|
|
|
|
const std::string missing_keys[2] = {"missing", "other"};
|
|
|
|
|
|
|
|
uint64_t MaxIndexSize() {
|
|
|
|
int num_keys = sizeof(keys) / sizeof(*keys);
|
|
|
|
uint64_t max_key_size = 0;
|
|
|
|
for (int i = 1; i < num_keys; i++) {
|
|
|
|
max_key_size = std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
|
|
|
|
}
|
|
|
|
uint64_t max_index_size = num_keys * (max_key_size + 8 /*handle*/);
|
|
|
|
return max_index_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t MaxFilterSize() {
|
|
|
|
int num_keys = sizeof(keys) / sizeof(*keys);
|
|
|
|
// General, rough over-approximation
|
|
|
|
return num_keys * bits_per_key_ + (CACHE_LINE_SIZE * 8 + /*metadata*/ 5);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t last_offset = 10;
|
|
|
|
BlockHandle Write(const Slice& slice) {
|
|
|
|
BlockHandle bh(last_offset + 1, slice.size());
|
|
|
|
blooms[bh.offset()] = slice.ToString();
|
|
|
|
last_offset += bh.size();
|
|
|
|
return bh;
|
|
|
|
}
|
|
|
|
|
|
|
|
PartitionedIndexBuilder* NewIndexBuilder() {
|
|
|
|
const bool kValueDeltaEncoded = true;
|
|
|
|
return PartitionedIndexBuilder::CreateIndexBuilder(
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
&icomp_, !kValueDeltaEncoded, table_options_);
|
|
|
|
}
|
|
|
|
|
|
|
|
PartitionedFilterBlockBuilder* NewBuilder(
|
|
|
|
PartitionedIndexBuilder* const p_index_builder,
|
|
|
|
const SliceTransform* prefix_extractor = nullptr) {
|
|
|
|
assert(table_options_.block_size_deviation <= 100);
|
|
|
|
auto partition_size = static_cast<uint32_t>(
|
|
|
|
((table_options_.metadata_block_size *
|
|
|
|
(100 - table_options_.block_size_deviation)) +
|
|
|
|
99) /
|
|
|
|
100);
|
|
|
|
partition_size = std::max(partition_size, static_cast<uint32_t>(1));
|
|
|
|
const bool kValueDeltaEncoded = true;
|
|
|
|
return new PartitionedFilterBlockBuilder(
|
|
|
|
prefix_extractor, table_options_.whole_key_filtering,
|
New Bloom filter implementation for full and partitioned filters (#6007)
Summary:
Adds an improved, replacement Bloom filter implementation (FastLocalBloom) for full and partitioned filters in the block-based table. This replacement is faster and more accurate, especially for high bits per key or millions of keys in a single filter.
Speed
The improved speed, at least on recent x86_64, comes from
* Using fastrange instead of modulo (%)
* Using our new hash function (XXH3 preview, added in a previous commit), which is much faster for large keys and only *slightly* slower on keys around 12 bytes if hashing the same size many thousands of times in a row.
* Optimizing the Bloom filter queries with AVX2 SIMD operations. (Added AVX2 to the USE_SSE=1 build.) Careful design was required to support (a) SIMD-optimized queries, (b) compatible non-SIMD code that's simple and efficient, (c) flexible choice of number of probes, and (d) essentially maximized accuracy for a cache-local Bloom filter. Probes are made eight at a time, so any number of probes up to 8 is the same speed, then up to 16, etc.
* Prefetching cache lines when building the filter. Although this optimization could be applied to the old structure as well, it seems to balance out the small added cost of accumulating 64 bit hashes for adding to the filter rather than 32 bit hashes.
Here's nominal speed data from filter_bench (200MB in filters, about 10k keys each, 10 bits filter data / key, 6 probes, avg key size 24 bytes, includes hashing time) on Skylake DE (relatively low clock speed):
$ ./filter_bench -quick -impl=2 -net_includes_hashing # New Bloom filter
Build avg ns/key: 47.7135
Mixed inside/outside queries...
Single filter net ns/op: 26.2825
Random filter net ns/op: 150.459
Average FP rate %: 0.954651
$ ./filter_bench -quick -impl=0 -net_includes_hashing # Old Bloom filter
Build avg ns/key: 47.2245
Mixed inside/outside queries...
Single filter net ns/op: 63.2978
Random filter net ns/op: 188.038
Average FP rate %: 1.13823
Similar build time but dramatically faster query times on hot data (63 ns to 26 ns), and somewhat faster on stale data (188 ns to 150 ns). Performance differences on batched and skewed query loads are between these extremes as expected.
The only other interesting thing about speed is "inside" (query key was added to filter) vs. "outside" (query key was not added to filter) query times. The non-SIMD implementations are substantially slower when most queries are "outside" vs. "inside". This goes against what one might expect or would have observed years ago, as "outside" queries only need about two probes on average, due to short-circuiting, while "inside" always have num_probes (say 6). The problem is probably the nastily unpredictable branch. The SIMD implementation has few branches (very predictable) and has pretty consistent running time regardless of query outcome.
Accuracy
The generally improved accuracy (re: Issue https://github.com/facebook/rocksdb/issues/5857) comes from a better design for probing indices
within a cache line (re: Issue https://github.com/facebook/rocksdb/issues/4120) and improved accuracy for millions of keys in a single filter from using a 64-bit hash function (XXH3p). Design details in code comments.
Accuracy data (generalizes, except old impl gets worse with millions of keys):
Memory bits per key: FP rate percent old impl -> FP rate percent new impl
6: 5.70953 -> 5.69888
8: 2.45766 -> 2.29709
10: 1.13977 -> 0.959254
12: 0.662498 -> 0.411593
16: 0.353023 -> 0.0873754
24: 0.261552 -> 0.0060971
50: 0.225453 -> ~0.00003 (less than 1 in a million queries are FP)
Fixes https://github.com/facebook/rocksdb/issues/5857
Fixes https://github.com/facebook/rocksdb/issues/4120
Unlike the old implementation, this implementation has a fixed cache line size (64 bytes). At 10 bits per key, the accuracy of this new implementation is very close to the old implementation with 128-byte cache line size. If there's sufficient demand, this implementation could be generalized.
Compatibility
Although old releases would see the new structure as corrupt filter data and read the table as if there's no filter, we've decided only to enable the new Bloom filter with new format_version=5. This provides a smooth path for automatic adoption over time, with an option for early opt-in.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6007
Test Plan: filter_bench has been used thoroughly to validate speed, accuracy, and correctness. Unit tests have been carefully updated to exercise new and old implementations, as well as the logic to select an implementation based on context (format_version).
Differential Revision: D18294749
Pulled By: pdillinger
fbshipit-source-id: d44c9db3696e4d0a17caaec47075b7755c262c5f
5 years ago
|
|
|
FilterBuildingContext(table_options_).GetBuilder(),
|
|
|
|
table_options_.index_block_restart_interval, !kValueDeltaEncoded,
|
|
|
|
p_index_builder, partition_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
PartitionedFilterBlockReader* NewReader(
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
PartitionedFilterBlockBuilder* builder, PartitionedIndexBuilder* pib) {
|
|
|
|
BlockHandle bh;
|
|
|
|
Status status;
|
|
|
|
Slice slice;
|
|
|
|
do {
|
|
|
|
slice = builder->Finish(bh, &status);
|
|
|
|
bh = Write(slice);
|
|
|
|
} while (status.IsIncomplete());
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
|
|
|
|
constexpr bool skip_filters = false;
|
|
|
|
constexpr int level = 0;
|
|
|
|
constexpr bool immortal_table = false;
|
|
|
|
table_.reset(new MockedBlockBasedTable(
|
|
|
|
new BlockBasedTable::Rep(ioptions_, env_options_, table_options_,
|
|
|
|
icomp_, skip_filters, level, immortal_table),
|
|
|
|
pib));
|
|
|
|
BlockContents contents(slice);
|
|
|
|
CachableEntry<Block> block(
|
|
|
|
new Block(std::move(contents), kDisableGlobalSequenceNumber,
|
|
|
|
0 /* read_amp_bytes_per_bit */, nullptr),
|
|
|
|
nullptr /* cache */, nullptr /* cache_handle */, true /* own_value */);
|
|
|
|
auto reader =
|
|
|
|
new MyPartitionedFilterBlockReader(table_.get(), std::move(block));
|
|
|
|
return reader;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyReader(PartitionedFilterBlockBuilder* builder,
|
|
|
|
PartitionedIndexBuilder* pib, bool empty = false,
|
|
|
|
const SliceTransform* prefix_extractor = nullptr) {
|
|
|
|
std::unique_ptr<PartitionedFilterBlockReader> reader(
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
NewReader(builder, pib));
|
|
|
|
// Querying added keys
|
|
|
|
const bool no_io = true;
|
|
|
|
for (auto key : keys) {
|
|
|
|
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
|
|
|
ASSERT_TRUE(reader->KeyMayMatch(key, prefix_extractor, kNotValid, !no_io,
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
&ikey_slice, /*get_context=*/nullptr,
|
|
|
|
/*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
// querying a key twice
|
|
|
|
auto ikey = InternalKey(keys[0], 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
ASSERT_TRUE(reader->KeyMayMatch(
|
|
|
|
keys[0], prefix_extractor, kNotValid, !no_io, &ikey_slice,
|
|
|
|
/*get_context=*/nullptr, /*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
// querying missing keys
|
|
|
|
for (auto key : missing_keys) {
|
|
|
|
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
|
|
|
if (empty) {
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
ASSERT_TRUE(reader->KeyMayMatch(
|
|
|
|
key, prefix_extractor, kNotValid, !no_io, &ikey_slice,
|
|
|
|
/*get_context=*/nullptr, /*lookup_context=*/nullptr));
|
|
|
|
} else {
|
|
|
|
// assuming a good hash function
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
ASSERT_FALSE(reader->KeyMayMatch(
|
|
|
|
key, prefix_extractor, kNotValid, !no_io, &ikey_slice,
|
|
|
|
/*get_context=*/nullptr, /*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int TestBlockPerKey() {
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(
|
|
|
|
NewBuilder(pib.get()));
|
|
|
|
int i = 0;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i], keys[i + 1]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i], keys[i + 1]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i], keys[i + 1]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i]);
|
|
|
|
|
|
|
|
VerifyReader(builder.get(), pib.get());
|
|
|
|
return CountNumOfIndexPartitions(pib.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
void TestBlockPerTwoKeys(const SliceTransform* prefix_extractor = nullptr) {
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(
|
|
|
|
NewBuilder(pib.get(), prefix_extractor));
|
|
|
|
int i = 0;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i], keys[i + 1]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i]);
|
|
|
|
|
|
|
|
VerifyReader(builder.get(), pib.get(), prefix_extractor);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TestBlockPerAllKeys() {
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(
|
|
|
|
NewBuilder(pib.get()));
|
|
|
|
int i = 0;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
i++;
|
|
|
|
builder->Add(keys[i]);
|
|
|
|
CutABlock(pib.get(), keys[i]);
|
|
|
|
|
|
|
|
VerifyReader(builder.get(), pib.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
void CutABlock(PartitionedIndexBuilder* builder,
|
|
|
|
const std::string& user_key) {
|
|
|
|
// Assuming a block is cut, add an entry to the index
|
|
|
|
std::string key =
|
|
|
|
std::string(*InternalKey(user_key, 0, ValueType::kTypeValue).rep());
|
|
|
|
BlockHandle dont_care_block_handle(1, 1);
|
|
|
|
builder->AddIndexEntry(&key, nullptr, dont_care_block_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CutABlock(PartitionedIndexBuilder* builder, const std::string& user_key,
|
|
|
|
const std::string& next_user_key) {
|
|
|
|
// Assuming a block is cut, add an entry to the index
|
|
|
|
std::string key =
|
|
|
|
std::string(*InternalKey(user_key, 0, ValueType::kTypeValue).rep());
|
|
|
|
std::string next_key = std::string(
|
|
|
|
*InternalKey(next_user_key, 0, ValueType::kTypeValue).rep());
|
|
|
|
BlockHandle dont_care_block_handle(1, 1);
|
|
|
|
Slice slice = Slice(next_key.data(), next_key.size());
|
|
|
|
builder->AddIndexEntry(&key, &slice, dont_care_block_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
int CountNumOfIndexPartitions(PartitionedIndexBuilder* builder) {
|
|
|
|
IndexBuilder::IndexBlocks dont_care_ib;
|
|
|
|
BlockHandle dont_care_bh(10, 10);
|
|
|
|
Status s;
|
|
|
|
int cnt = 0;
|
|
|
|
do {
|
|
|
|
s = builder->Finish(&dont_care_ib, dont_care_bh);
|
|
|
|
cnt++;
|
|
|
|
} while (s.IsIncomplete());
|
|
|
|
return cnt - 1; // 1 is 2nd level index
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(FormatDef, PartitionedFilterBlockTest,
|
|
|
|
testing::Values(test::kDefaultFormatVersion));
|
|
|
|
INSTANTIATE_TEST_CASE_P(FormatLatest, PartitionedFilterBlockTest,
|
|
|
|
testing::Values(test::kLatestFormatVersion));
|
|
|
|
|
|
|
|
TEST_P(PartitionedFilterBlockTest, EmptyBuilder) {
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(NewBuilder(pib.get()));
|
|
|
|
const bool empty = true;
|
|
|
|
VerifyReader(builder.get(), pib.get(), empty);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(PartitionedFilterBlockTest, OneBlock) {
|
|
|
|
uint64_t max_index_size = MaxIndexSize();
|
|
|
|
for (uint64_t i = 1; i < max_index_size + 1; i++) {
|
|
|
|
table_options_.metadata_block_size = i;
|
|
|
|
TestBlockPerAllKeys();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(PartitionedFilterBlockTest, TwoBlocksPerKey) {
|
|
|
|
uint64_t max_index_size = MaxIndexSize();
|
|
|
|
for (uint64_t i = 1; i < max_index_size + 1; i++) {
|
|
|
|
table_options_.metadata_block_size = i;
|
|
|
|
TestBlockPerTwoKeys();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This reproduces the bug that a prefix is the same among multiple consecutive
|
|
|
|
// blocks but the bug would add it only to the first block.
|
|
|
|
TEST_P(PartitionedFilterBlockTest, SamePrefixInMultipleBlocks) {
|
|
|
|
// some small number to cause partition cuts
|
|
|
|
table_options_.metadata_block_size = 1;
|
|
|
|
std::unique_ptr<const SliceTransform> prefix_extractor
|
|
|
|
(rocksdb::NewFixedPrefixTransform(1));
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(
|
|
|
|
NewBuilder(pib.get(), prefix_extractor.get()));
|
|
|
|
const std::string pkeys[3] = {"p-key10", "p-key20", "p-key30"};
|
|
|
|
builder->Add(pkeys[0]);
|
|
|
|
CutABlock(pib.get(), pkeys[0], pkeys[1]);
|
|
|
|
builder->Add(pkeys[1]);
|
|
|
|
CutABlock(pib.get(), pkeys[1], pkeys[2]);
|
|
|
|
builder->Add(pkeys[2]);
|
|
|
|
CutABlock(pib.get(), pkeys[2]);
|
|
|
|
std::unique_ptr<PartitionedFilterBlockReader> reader(
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
NewReader(builder.get(), pib.get()));
|
|
|
|
for (auto key : pkeys) {
|
|
|
|
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
|
|
|
ASSERT_TRUE(reader->PrefixMayMatch(
|
|
|
|
prefix_extractor->Transform(key), prefix_extractor.get(), kNotValid,
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
5 years ago
|
|
|
/*no_io=*/false, &ikey_slice, /*get_context=*/nullptr,
|
|
|
|
/*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
// Non-existent keys but with the same prefix
|
|
|
|
const std::string pnonkeys[4] = {"p-key9", "p-key11", "p-key21", "p-key31"};
|
|
|
|
for (auto key : pnonkeys) {
|
|
|
|
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
|
|
|
ASSERT_TRUE(reader->PrefixMayMatch(
|
|
|
|
prefix_extractor->Transform(key), prefix_extractor.get(), kNotValid,
|
|
|
|
/*no_io=*/false, &ikey_slice, /*get_context=*/nullptr,
|
|
|
|
/*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This reproduces the bug in format_version=3 that the seeking the prefix will
|
|
|
|
// lead us to the partition before the one that has filter for the prefix.
|
|
|
|
TEST_P(PartitionedFilterBlockTest, PrefixInWrongPartitionBug) {
|
|
|
|
// some small number to cause partition cuts
|
|
|
|
table_options_.metadata_block_size = 1;
|
|
|
|
std::unique_ptr<const SliceTransform> prefix_extractor(
|
|
|
|
rocksdb::NewFixedPrefixTransform(2));
|
|
|
|
std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
|
|
|
|
std::unique_ptr<PartitionedFilterBlockBuilder> builder(
|
|
|
|
NewBuilder(pib.get(), prefix_extractor.get()));
|
|
|
|
// In the bug, searching for prefix "p3" on an index with format version 3,
|
|
|
|
// will give the key "p3" and the partition of the keys that are <= p3, i.e.,
|
|
|
|
// p2-keys, where the filter for prefix "p3" does not exist.
|
|
|
|
const std::string pkeys[] = {"p1-key1", "p2-key2", "p3-key3", "p4-key3",
|
|
|
|
"p5-key3"};
|
|
|
|
builder->Add(pkeys[0]);
|
|
|
|
CutABlock(pib.get(), pkeys[0], pkeys[1]);
|
|
|
|
builder->Add(pkeys[1]);
|
|
|
|
CutABlock(pib.get(), pkeys[1], pkeys[2]);
|
|
|
|
builder->Add(pkeys[2]);
|
|
|
|
CutABlock(pib.get(), pkeys[2], pkeys[3]);
|
|
|
|
builder->Add(pkeys[3]);
|
|
|
|
CutABlock(pib.get(), pkeys[3], pkeys[4]);
|
|
|
|
builder->Add(pkeys[4]);
|
|
|
|
CutABlock(pib.get(), pkeys[4]);
|
|
|
|
std::unique_ptr<PartitionedFilterBlockReader> reader(
|
|
|
|
NewReader(builder.get(), pib.get()));
|
|
|
|
for (auto key : pkeys) {
|
|
|
|
auto prefix = prefix_extractor->Transform(key);
|
|
|
|
auto ikey = InternalKey(prefix, 0, ValueType::kTypeValue);
|
|
|
|
const Slice ikey_slice = Slice(*ikey.rep());
|
|
|
|
ASSERT_TRUE(reader->PrefixMayMatch(
|
|
|
|
prefix, prefix_extractor.get(), kNotValid,
|
|
|
|
/*no_io=*/false, &ikey_slice, /*get_context=*/nullptr,
|
|
|
|
/*lookup_context=*/nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(PartitionedFilterBlockTest, OneBlockPerKey) {
|
|
|
|
uint64_t max_index_size = MaxIndexSize();
|
|
|
|
for (uint64_t i = 1; i < max_index_size + 1; i++) {
|
|
|
|
table_options_.metadata_block_size = i;
|
|
|
|
TestBlockPerKey();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(PartitionedFilterBlockTest, PartitionCount) {
|
|
|
|
int num_keys = sizeof(keys) / sizeof(*keys);
|
|
|
|
table_options_.metadata_block_size =
|
|
|
|
std::max(MaxIndexSize(), MaxFilterSize());
|
|
|
|
int partitions = TestBlockPerKey();
|
|
|
|
ASSERT_EQ(partitions, 1);
|
|
|
|
// A low number ensures cutting a block after each key
|
|
|
|
table_options_.metadata_block_size = 1;
|
|
|
|
partitions = TestBlockPerKey();
|
|
|
|
ASSERT_EQ(partitions, num_keys - 1 /* last two keys make one flush */);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|