|
|
|
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include "db/range_del_aggregator.h"
|
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "db/db_test_util.h"
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/range_tombstone_fragmenter.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
#include "util/vector_iterator.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
class RangeDelAggregatorTest : public testing::Test {};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator());
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> MakeRangeDelIter(
|
|
|
|
const std::vector<RangeTombstone>& range_dels) {
|
|
|
|
std::vector<std::string> keys, values;
|
|
|
|
for (const auto& range_del : range_dels) {
|
|
|
|
auto key_and_value = range_del.Serialize();
|
|
|
|
keys.push_back(key_and_value.first.Encode().ToString());
|
|
|
|
values.push_back(key_and_value.second.ToString());
|
|
|
|
}
|
|
|
|
return std::unique_ptr<VectorIterator>(
|
|
|
|
new VectorIterator(keys, values, &bytewise_icmp));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>>
|
|
|
|
MakeFragmentedTombstoneLists(
|
|
|
|
const std::vector<std::vector<RangeTombstone>>& range_dels_list) {
|
|
|
|
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>> fragment_lists;
|
|
|
|
for (const auto& range_dels : range_dels_list) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter(range_dels);
|
|
|
|
fragment_lists.emplace_back(new FragmentedRangeTombstoneList(
|
|
|
|
std::move(range_del_iter), bytewise_icmp));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
return fragment_lists;
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
struct TruncatedIterScanTestCase {
|
|
|
|
ParsedInternalKey start;
|
|
|
|
ParsedInternalKey end;
|
|
|
|
SequenceNumber seq;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct TruncatedIterSeekTestCase {
|
|
|
|
Slice target;
|
|
|
|
ParsedInternalKey start;
|
|
|
|
ParsedInternalKey end;
|
|
|
|
SequenceNumber seq;
|
|
|
|
bool invalid;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ShouldDeleteTestCase {
|
|
|
|
ParsedInternalKey lookup_key;
|
|
|
|
bool result;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct IsRangeOverlappedTestCase {
|
|
|
|
Slice start;
|
|
|
|
Slice end;
|
|
|
|
bool result;
|
|
|
|
};
|
|
|
|
|
|
|
|
ParsedInternalKey UncutEndpoint(const Slice& s) {
|
|
|
|
return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion);
|
|
|
|
}
|
|
|
|
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2 years ago
|
|
|
ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq,
|
|
|
|
ValueType type = kTypeValue) {
|
|
|
|
return ParsedInternalKey(key, seq, type);
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyIterator(
|
|
|
|
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterScanTestCase>& expected_range_dels) {
|
|
|
|
// Test forward iteration.
|
|
|
|
iter->SeekToFirst();
|
|
|
|
for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end));
|
|
|
|
EXPECT_EQ(expected_range_dels[i].seq, iter->seq());
|
|
|
|
}
|
|
|
|
EXPECT_FALSE(iter->Valid());
|
|
|
|
|
|
|
|
// Test reverse iteration.
|
|
|
|
iter->SeekToLast();
|
|
|
|
std::vector<TruncatedIterScanTestCase> reverse_expected_range_dels(
|
|
|
|
expected_range_dels.rbegin(), expected_range_dels.rend());
|
|
|
|
for (size_t i = 0; i < reverse_expected_range_dels.size();
|
|
|
|
i++, iter->Prev()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(),
|
|
|
|
reverse_expected_range_dels[i].start));
|
|
|
|
EXPECT_EQ(
|
|
|
|
0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end));
|
|
|
|
EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq());
|
|
|
|
}
|
|
|
|
EXPECT_FALSE(iter->Valid());
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VerifySeek(TruncatedRangeDelIterator* iter,
|
|
|
|
const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
iter->Seek(test_case.target);
|
|
|
|
if (test_case.invalid) {
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
|
|
|
EXPECT_EQ(test_case.seq, iter->seq());
|
|
|
|
}
|
|
|
|
}
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VerifySeekForPrev(
|
|
|
|
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
iter->SeekForPrev(test_case.target);
|
|
|
|
if (test_case.invalid) {
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
|
|
|
EXPECT_EQ(test_case.seq, iter->seq());
|
|
|
|
}
|
|
|
|
}
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyShouldDelete(RangeDelAggregator* range_del_agg,
|
|
|
|
const std::vector<ShouldDeleteTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
EXPECT_EQ(
|
|
|
|
test_case.result,
|
|
|
|
range_del_agg->ShouldDelete(
|
|
|
|
test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal));
|
|
|
|
}
|
|
|
|
for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) {
|
|
|
|
const auto& test_case = *it;
|
|
|
|
EXPECT_EQ(
|
|
|
|
test_case.result,
|
|
|
|
range_del_agg->ShouldDelete(
|
|
|
|
test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyIsRangeOverlapped(
|
|
|
|
ReadRangeDelAggregator* range_del_agg,
|
|
|
|
const std::vector<IsRangeOverlappedTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
EXPECT_EQ(test_case.result,
|
|
|
|
range_del_agg->IsRangeOverlapped(test_case.start, test_case.end));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckIterPosition(const RangeTombstone& tombstone,
|
|
|
|
const FragmentedRangeTombstoneIterator* iter) {
|
|
|
|
// Test InternalIterator interface.
|
|
|
|
EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key()));
|
|
|
|
EXPECT_EQ(tombstone.end_key_, iter->value());
|
|
|
|
EXPECT_EQ(tombstone.seq_, iter->seq());
|
|
|
|
|
|
|
|
// Test FragmentedRangeTombstoneIterator interface.
|
|
|
|
EXPECT_EQ(tombstone.start_key_, iter->start_key());
|
|
|
|
EXPECT_EQ(tombstone.end_key_, iter->end_key());
|
|
|
|
EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key()));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyFragmentedRangeDels(
|
|
|
|
FragmentedRangeTombstoneIterator* iter,
|
|
|
|
const std::vector<RangeTombstone>& expected_tombstones) {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
CheckIterPosition(expected_tombstones[i], iter);
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
EXPECT_FALSE(iter->Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, EmptyTruncatedIter) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter({});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
iter.SeekToFirst();
|
|
|
|
ASSERT_FALSE(iter.Valid());
|
|
|
|
|
|
|
|
iter.SeekToLast();
|
|
|
|
ASSERT_FALSE(iter.Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, UntruncatedIter) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", UncutEndpoint("a"), UncutEndpoint("e"), 10}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, UntruncatedIterWithSnapshot) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
9 /* snapshot */));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", UncutEndpoint("e"), UncutEndpoint("g"), 8}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, TruncatedIterPartiallyCutTombstones) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
InternalKey smallest("d", 7, kTypeValue);
|
|
|
|
InternalKey largest("m", 9, kTypeValue);
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
|
|
|
&smallest, &largest);
|
|
|
|
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2 years ago
|
|
|
VerifyIterator(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), InternalValue("m", 8, kValueTypeForSeek), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2 years ago
|
|
|
{"ia", UncutEndpoint("j"), InternalValue("m", 8, kValueTypeForSeek), 4,
|
|
|
|
false /* invalid */},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", InternalValue("d", 7), UncutEndpoint("e"), 10}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2 years ago
|
|
|
{"n", UncutEndpoint("j"), InternalValue("m", 8, kValueTypeForSeek), 4,
|
|
|
|
false /* invalid */},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, TruncatedIterFullyCutTombstones) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
InternalKey smallest("f", 7, kTypeValue);
|
|
|
|
InternalKey largest("i", 9, kTypeValue);
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
|
|
|
&smallest, &largest);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"j", InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, SingleIterInAggregator) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleItersInAggregator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
|
|
|
{InternalValue("b", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), true},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", true},
|
|
|
|
{"x", "y", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleItersInAggregatorWithUpperBound) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
|
|
|
{InternalValue("a", 9), true},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), false},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", true},
|
|
|
|
{"x", "y", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
|
|
|
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
|
|
|
{InternalKey("a", 4, kTypeValue),
|
|
|
|
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("m", 20, kTypeValue),
|
|
|
|
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
for (size_t i = 0; i < fragment_lists.size(); i++) {
|
|
|
|
const auto& fragment_list = fragment_lists[i];
|
|
|
|
const auto& bounds = iter_bounds[i];
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter), &bounds.first,
|
|
|
|
&bounds.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
|
|
|
{InternalValue("a", 9), false},
|
|
|
|
{InternalValue("a", 4), true},
|
|
|
|
{InternalValue("m", 10), false},
|
|
|
|
{InternalValue("m", 9), true},
|
|
|
|
{InternalValue("x", 10), false},
|
|
|
|
{InternalValue("x", 9), false},
|
|
|
|
{InternalValue("x", 5), true},
|
|
|
|
{InternalValue("z", 9), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "n", true},
|
|
|
|
{"l", "x", true},
|
|
|
|
{"w", "z", true},
|
|
|
|
{"zzz", "zz", false},
|
|
|
|
{"zz", "zzz", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregatorSameLevel) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
|
|
|
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
|
|
|
{InternalKey("a", 4, kTypeValue),
|
|
|
|
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("m", 20, kTypeValue),
|
|
|
|
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
|
|
|
|
auto add_iter_to_agg = [&](size_t i) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_lists[i].get(),
|
|
|
|
bytewise_icmp, 19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first,
|
|
|
|
&iter_bounds[i].second);
|
|
|
|
};
|
|
|
|
|
|
|
|
add_iter_to_agg(0);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
|
|
|
{InternalValue("a", 9), false},
|
|
|
|
{InternalValue("a", 4), true}});
|
|
|
|
|
|
|
|
add_iter_to_agg(1);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false},
|
|
|
|
{InternalValue("m", 9), true}});
|
|
|
|
|
|
|
|
add_iter_to_agg(2);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false},
|
|
|
|
{InternalValue("x", 9), false},
|
|
|
|
{InternalValue("x", 5), true},
|
|
|
|
{InternalValue("z", 9), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "n", true},
|
|
|
|
{"l", "x", true},
|
|
|
|
{"w", "z", true},
|
|
|
|
{"zzz", "zz", false},
|
|
|
|
{"zz", "zzz", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorNoSnapshots) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
|
|
|
{InternalValue("b", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), true},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "e", 10},
|
|
|
|
{"e", "g", 8},
|
|
|
|
{"h", "i", 25},
|
|
|
|
{"ii", "j", 15}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorWithSnapshots) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
VerifyShouldDelete(
|
|
|
|
&range_del_agg,
|
|
|
|
{
|
|
|
|
{InternalValue("a", 19), false}, // [10, 19]
|
|
|
|
{InternalValue("a", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("b", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("d", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("d", 7), true}, // [0, 9]
|
|
|
|
{InternalValue("e", 7), true}, // [0, 9]
|
|
|
|
{InternalValue("g", 7), false}, // [0, 9]
|
|
|
|
{InternalValue("h", 24), true}, // [20, kMaxSequenceNumber]
|
|
|
|
{InternalValue("i", 24), false}, // [20, kMaxSequenceNumber]
|
|
|
|
{InternalValue("ii", 14), true}, // [10, 19]
|
|
|
|
{InternalValue("j", 14), false} // [10, 19]
|
|
|
|
});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
|
|
|
{"a", "b", 10},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "e", 10},
|
|
|
|
{"c", "e", 8},
|
|
|
|
{"e", "g", 8},
|
|
|
|
{"h", "i", 25},
|
|
|
|
{"ii", "j", 15}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorLeft) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice start("_");
|
|
|
|
Slice end("__");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorRight) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice start("p");
|
|
|
|
Slice end("q");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorBoundedIterator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice start("bb");
|
|
|
|
Slice end("e");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(),
|
|
|
|
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(
|
|
|
|
range_del_compaction_iter2.get(),
|
|
|
|
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest,
|
|
|
|
CompactionAggregatorBoundedIteratorExtraFragments) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "d", 10}, {"c", "g", 8}},
|
|
|
|
{{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice start("bb");
|
|
|
|
Slice end("e");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10},
|
|
|
|
{"b", "c", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "d", 10},
|
|
|
|
{"c", "d", 8},
|
|
|
|
{"d", "f", 30},
|
|
|
|
{"d", "f", 8},
|
|
|
|
{"f", "g", 8}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10},
|
|
|
|
{"b", "c", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "d", 10},
|
|
|
|
{"c", "d", 8},
|
|
|
|
{"d", "f", 30},
|
|
|
|
{"d", "f", 8},
|
|
|
|
{"f", "g", 8}});
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|