Remove v1 RangeDelAggregator (#4778)

Summary:
Now that v2 is fully functional, the v1 aggregator is removed.
The v2 aggregator has been renamed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4778

Differential Revision: D13495930

Pulled By: abhimadan

fbshipit-source-id: 9d69500a60a283e79b6c4fa938fc68a8aa4d40d6
main
Abhishek Madan 6 years ago committed by Facebook Github Bot
parent 311cd8cf2f
commit 81b6b09f6b
  1. 2
      CMakeLists.txt
  2. 6
      Makefile
  3. 6
      TARGETS
  4. 6
      db/builder.cc
  5. 4
      db/column_family.cc
  6. 4
      db/compaction_iterator.cc
  7. 8
      db/compaction_iterator.h
  8. 5
      db/compaction_iterator_test.cc
  9. 6
      db/compaction_job.cc
  10. 4
      db/compaction_job.h
  11. 10
      db/db_compaction_filter_test.cc
  12. 13
      db/db_impl.cc
  13. 15
      db/db_impl.h
  14. 1
      db/db_impl_readonly.cc
  15. 6
      db/db_iter.cc
  16. 4
      db/db_iter.h
  17. 4
      db/db_memtable_test.cc
  18. 6
      db/db_test_util.cc
  19. 10
      db/forward_iterator.cc
  20. 2
      db/memtable_list.cc
  21. 4
      db/memtable_list.h
  22. 1
      db/memtable_list_test.cc
  23. 6
      db/merge_context.h
  24. 2
      db/merge_helper.cc
  25. 4
      db/merge_helper.h
  26. 999
      db/range_del_aggregator.cc
  27. 554
      db/range_del_aggregator.h
  28. 36
      db/range_del_aggregator_bench.cc
  29. 1073
      db/range_del_aggregator_test.cc
  30. 492
      db/range_del_aggregator_v2.cc
  31. 436
      db/range_del_aggregator_v2.h
  32. 709
      db/range_del_aggregator_v2_test.cc
  33. 2
      db/table_cache.cc
  34. 4
      db/table_cache.h
  35. 12
      db/version_set.cc
  36. 8
      db/version_set.h
  37. 2
      src.mk
  38. 2
      utilities/debug.cc

@ -504,7 +504,6 @@ set(SOURCES
db/merge_helper.cc db/merge_helper.cc
db/merge_operator.cc db/merge_operator.cc
db/range_del_aggregator.cc db/range_del_aggregator.cc
db/range_del_aggregator_v2.cc
db/range_tombstone_fragmenter.cc db/range_tombstone_fragmenter.cc
db/repair.cc db/repair.cc
db/snapshot_impl.cc db/snapshot_impl.cc
@ -908,7 +907,6 @@ if(WITH_TESTS)
db/plain_table_db_test.cc db/plain_table_db_test.cc
db/prefix_test.cc db/prefix_test.cc
db/range_del_aggregator_test.cc db/range_del_aggregator_test.cc
db/range_del_aggregator_v2_test.cc
db/range_tombstone_fragmenter_test.cc db/range_tombstone_fragmenter_test.cc
db/repair_test.cc db/repair_test.cc
db/table_properties_collector_test.cc db/table_properties_collector_test.cc

@ -543,7 +543,6 @@ TESTS = \
persistent_cache_test \ persistent_cache_test \
statistics_test \ statistics_test \
lua_test \ lua_test \
range_del_aggregator_test \
lru_cache_test \ lru_cache_test \
object_registry_test \ object_registry_test \
repair_test \ repair_test \
@ -554,7 +553,7 @@ TESTS = \
trace_analyzer_test \ trace_analyzer_test \
repeatable_thread_test \ repeatable_thread_test \
range_tombstone_fragmenter_test \ range_tombstone_fragmenter_test \
range_del_aggregator_v2_test \ range_del_aggregator_test \
sst_file_reader_test \ sst_file_reader_test \
PARALLEL_TEST = \ PARALLEL_TEST = \
@ -1588,9 +1587,6 @@ repeatable_thread_test: util/repeatable_thread_test.o $(LIBOBJECTS) $(TESTHARNES
range_tombstone_fragmenter_test: db/range_tombstone_fragmenter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) range_tombstone_fragmenter_test: db/range_tombstone_fragmenter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK) $(AM_LINK)
range_del_aggregator_v2_test: db/range_del_aggregator_v2_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
sst_file_reader_test: table/sst_file_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) sst_file_reader_test: table/sst_file_reader_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK) $(AM_LINK)

@ -124,7 +124,6 @@ cpp_library(
"db/merge_helper.cc", "db/merge_helper.cc",
"db/merge_operator.cc", "db/merge_operator.cc",
"db/range_del_aggregator.cc", "db/range_del_aggregator.cc",
"db/range_del_aggregator_v2.cc",
"db/range_tombstone_fragmenter.cc", "db/range_tombstone_fragmenter.cc",
"db/repair.cc", "db/repair.cc",
"db/snapshot_impl.cc", "db/snapshot_impl.cc",
@ -936,11 +935,6 @@ ROCKS_TESTS = [
"db/range_del_aggregator_test.cc", "db/range_del_aggregator_test.cc",
"serial", "serial",
], ],
[
"range_del_aggregator_v2_test",
"db/range_del_aggregator_v2_test.cc",
"serial",
],
[ [
"range_tombstone_fragmenter_test", "range_tombstone_fragmenter_test",
"db/range_tombstone_fragmenter_test.cc", "db/range_tombstone_fragmenter_test.cc",

@ -18,7 +18,7 @@
#include "db/event_helpers.h" #include "db/event_helpers.h"
#include "db/internal_stats.h" #include "db/internal_stats.h"
#include "db/merge_helper.h" #include "db/merge_helper.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/table_cache.h" #include "db/table_cache.h"
#include "db/version_edit.h" #include "db/version_edit.h"
#include "monitoring/iostats_context_imp.h" #include "monitoring/iostats_context_imp.h"
@ -88,8 +88,8 @@ Status BuildTable(
Status s; Status s;
meta->fd.file_size = 0; meta->fd.file_size = 0;
iter->SeekToFirst(); iter->SeekToFirst();
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg( std::unique_ptr<CompactionRangeDelAggregator> range_del_agg(
new CompactionRangeDelAggregatorV2(&internal_comparator, snapshots)); new CompactionRangeDelAggregator(&internal_comparator, snapshots));
for (auto& range_del_iter : range_del_iters) { for (auto& range_del_iter : range_del_iters) {
range_del_agg->AddTombstones(std::move(range_del_iter)); range_del_agg->AddTombstones(std::move(range_del_iter));
} }

@ -25,7 +25,7 @@
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/internal_stats.h" #include "db/internal_stats.h"
#include "db/job_context.h" #include "db/job_context.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/table_properties_collector.h" #include "db/table_properties_collector.h"
#include "db/version_set.h" #include "db/version_set.h"
#include "db/write_controller.h" #include "db/write_controller.h"
@ -945,7 +945,7 @@ Status ColumnFamilyData::RangesOverlapWithMemtables(
ScopedArenaIterator memtable_iter(merge_iter_builder.Finish()); ScopedArenaIterator memtable_iter(merge_iter_builder.Finish());
auto read_seq = super_version->current->version_set()->LastSequence(); auto read_seq = super_version->current->version_set()->LastSequence();
ReadRangeDelAggregatorV2 range_del_agg(&internal_comparator_, read_seq); ReadRangeDelAggregator range_del_agg(&internal_comparator_, read_seq);
auto* active_range_del_iter = auto* active_range_del_iter =
super_version->mem->NewRangeTombstoneIterator(read_opts, read_seq); super_version->mem->NewRangeTombstoneIterator(read_opts, read_seq);
range_del_agg.AddTombstones( range_del_agg.AddTombstones(

@ -18,7 +18,7 @@ CompactionIterator::CompactionIterator(
SequenceNumber earliest_write_conflict_snapshot, SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env, const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key, bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregatorV2* range_del_agg, const Compaction* compaction, CompactionRangeDelAggregator* range_del_agg, const Compaction* compaction,
const CompactionFilter* compaction_filter, const CompactionFilter* compaction_filter,
const std::atomic<bool>* shutting_down, const std::atomic<bool>* shutting_down,
const SequenceNumber preserve_deletes_seqnum) const SequenceNumber preserve_deletes_seqnum)
@ -36,7 +36,7 @@ CompactionIterator::CompactionIterator(
SequenceNumber earliest_write_conflict_snapshot, SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env, const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key, bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
std::unique_ptr<CompactionProxy> compaction, std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter, const CompactionFilter* compaction_filter,
const std::atomic<bool>* shutting_down, const std::atomic<bool>* shutting_down,

@ -13,7 +13,7 @@
#include "db/compaction_iteration_stats.h" #include "db/compaction_iteration_stats.h"
#include "db/merge_helper.h" #include "db/merge_helper.h"
#include "db/pinned_iterators_manager.h" #include "db/pinned_iterators_manager.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/snapshot_checker.h" #include "db/snapshot_checker.h"
#include "options/cf_options.h" #include "options/cf_options.h"
#include "rocksdb/compaction_filter.h" #include "rocksdb/compaction_filter.h"
@ -64,7 +64,7 @@ class CompactionIterator {
SequenceNumber earliest_write_conflict_snapshot, SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env, const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key, bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
const Compaction* compaction = nullptr, const Compaction* compaction = nullptr,
const CompactionFilter* compaction_filter = nullptr, const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr, const std::atomic<bool>* shutting_down = nullptr,
@ -77,7 +77,7 @@ class CompactionIterator {
SequenceNumber earliest_write_conflict_snapshot, SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env, const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key, bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
std::unique_ptr<CompactionProxy> compaction, std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter = nullptr, const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr, const std::atomic<bool>* shutting_down = nullptr,
@ -141,7 +141,7 @@ class CompactionIterator {
Env* env_; Env* env_;
bool report_detailed_time_; bool report_detailed_time_;
bool expect_valid_internal_key_; bool expect_valid_internal_key_;
CompactionRangeDelAggregatorV2* range_del_agg_; CompactionRangeDelAggregator* range_del_agg_;
std::unique_ptr<CompactionProxy> compaction_; std::unique_ptr<CompactionProxy> compaction_;
const CompactionFilter* compaction_filter_; const CompactionFilter* compaction_filter_;
const std::atomic<bool>* shutting_down_; const std::atomic<bool>* shutting_down_;

@ -228,8 +228,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter( std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
new FragmentedRangeTombstoneIterator(tombstone_list, icmp_, new FragmentedRangeTombstoneIterator(tombstone_list, icmp_,
kMaxSequenceNumber)); kMaxSequenceNumber));
range_del_agg_.reset( range_del_agg_.reset(new CompactionRangeDelAggregator(&icmp_, snapshots_));
new CompactionRangeDelAggregatorV2(&icmp_, snapshots_));
range_del_agg_->AddTombstones(std::move(range_del_iter)); range_del_agg_->AddTombstones(std::move(range_del_iter));
std::unique_ptr<CompactionIterator::CompactionProxy> compaction; std::unique_ptr<CompactionIterator::CompactionProxy> compaction;
@ -298,7 +297,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
std::unique_ptr<MergeHelper> merge_helper_; std::unique_ptr<MergeHelper> merge_helper_;
std::unique_ptr<LoggingForwardVectorIterator> iter_; std::unique_ptr<LoggingForwardVectorIterator> iter_;
std::unique_ptr<CompactionIterator> c_iter_; std::unique_ptr<CompactionIterator> c_iter_;
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg_; std::unique_ptr<CompactionRangeDelAggregator> range_del_agg_;
std::unique_ptr<SnapshotChecker> snapshot_checker_; std::unique_ptr<SnapshotChecker> snapshot_checker_;
std::atomic<bool> shutting_down_{false}; std::atomic<bool> shutting_down_{false};
FakeCompaction* compaction_proxy_; FakeCompaction* compaction_proxy_;

@ -36,7 +36,7 @@
#include "db/memtable_list.h" #include "db/memtable_list.h"
#include "db/merge_context.h" #include "db/merge_context.h"
#include "db/merge_helper.h" #include "db/merge_helper.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/version_set.h" #include "db/version_set.h"
#include "monitoring/iostats_context_imp.h" #include "monitoring/iostats_context_imp.h"
#include "monitoring/perf_context_imp.h" #include "monitoring/perf_context_imp.h"
@ -805,7 +805,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
assert(sub_compact != nullptr); assert(sub_compact != nullptr);
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
CompactionRangeDelAggregatorV2 range_del_agg(&cfd->internal_comparator(), CompactionRangeDelAggregator range_del_agg(&cfd->internal_comparator(),
existing_snapshots_); existing_snapshots_);
// Although the v2 aggregator is what the level iterator(s) know about, // Although the v2 aggregator is what the level iterator(s) know about,
@ -1165,7 +1165,7 @@ void CompactionJob::RecordDroppedKeys(
Status CompactionJob::FinishCompactionOutputFile( Status CompactionJob::FinishCompactionOutputFile(
const Status& input_status, SubcompactionState* sub_compact, const Status& input_status, SubcompactionState* sub_compact,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
CompactionIterationStats* range_del_out_stats, CompactionIterationStats* range_del_out_stats,
const Slice* next_table_min_key /* = nullptr */) { const Slice* next_table_min_key /* = nullptr */) {
AutoThreadOperationStageUpdater stage_updater( AutoThreadOperationStageUpdater stage_updater(

@ -25,7 +25,7 @@
#include "db/job_context.h" #include "db/job_context.h"
#include "db/log_writer.h" #include "db/log_writer.h"
#include "db/memtable_list.h" #include "db/memtable_list.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/version_edit.h" #include "db/version_edit.h"
#include "db/write_controller.h" #include "db/write_controller.h"
#include "db/write_thread.h" #include "db/write_thread.h"
@ -104,7 +104,7 @@ class CompactionJob {
Status FinishCompactionOutputFile( Status FinishCompactionOutputFile(
const Status& input_status, SubcompactionState* sub_compact, const Status& input_status, SubcompactionState* sub_compact,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
CompactionIterationStats* range_del_out_stats, CompactionIterationStats* range_del_out_stats,
const Slice* next_table_min_key = nullptr); const Slice* next_table_min_key = nullptr);
Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options); Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options);

@ -340,8 +340,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
Arena arena; Arena arena;
{ {
InternalKeyComparator icmp(options.comparator); InternalKeyComparator icmp(options.comparator);
ReadRangeDelAggregatorV2 range_del_agg( ReadRangeDelAggregator range_del_agg(&icmp,
&icmp, kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
ScopedArenaIterator iter(dbfull()->NewInternalIterator( ScopedArenaIterator iter(dbfull()->NewInternalIterator(
&arena, &range_del_agg, kMaxSequenceNumber, handles_[1])); &arena, &range_del_agg, kMaxSequenceNumber, handles_[1]));
iter->SeekToFirst(); iter->SeekToFirst();
@ -430,8 +430,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
count = 0; count = 0;
{ {
InternalKeyComparator icmp(options.comparator); InternalKeyComparator icmp(options.comparator);
ReadRangeDelAggregatorV2 range_del_agg( ReadRangeDelAggregator range_del_agg(&icmp,
&icmp, kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
ScopedArenaIterator iter(dbfull()->NewInternalIterator( ScopedArenaIterator iter(dbfull()->NewInternalIterator(
&arena, &range_del_agg, kMaxSequenceNumber, handles_[1])); &arena, &range_del_agg, kMaxSequenceNumber, handles_[1]));
iter->SeekToFirst(); iter->SeekToFirst();
@ -648,7 +648,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
int total = 0; int total = 0;
Arena arena; Arena arena;
InternalKeyComparator icmp(options.comparator); InternalKeyComparator icmp(options.comparator);
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* snapshots */); kMaxSequenceNumber /* snapshots */);
ScopedArenaIterator iter(dbfull()->NewInternalIterator( ScopedArenaIterator iter(dbfull()->NewInternalIterator(
&arena, &range_del_agg, kMaxSequenceNumber)); &arena, &range_del_agg, kMaxSequenceNumber));

@ -45,7 +45,6 @@
#include "db/memtable_list.h" #include "db/memtable_list.h"
#include "db/merge_context.h" #include "db/merge_context.h"
#include "db/merge_helper.h" #include "db/merge_helper.h"
#include "db/range_del_aggregator.h"
#include "db/range_tombstone_fragmenter.h" #include "db/range_tombstone_fragmenter.h"
#include "db/table_cache.h" #include "db/table_cache.h"
#include "db/table_properties_collector.h" #include "db/table_properties_collector.h"
@ -1033,7 +1032,7 @@ bool DBImpl::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) {
} }
InternalIterator* DBImpl::NewInternalIterator( InternalIterator* DBImpl::NewInternalIterator(
Arena* arena, RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence, Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
ColumnFamilyHandle* column_family) { ColumnFamilyHandle* column_family) {
ColumnFamilyData* cfd; ColumnFamilyData* cfd;
if (column_family == nullptr) { if (column_family == nullptr) {
@ -1150,10 +1149,12 @@ static void CleanupIteratorState(void* arg1, void* /*arg2*/) {
} }
} // namespace } // namespace
InternalIterator* DBImpl::NewInternalIterator( InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options,
const ReadOptions& read_options, ColumnFamilyData* cfd, ColumnFamilyData* cfd,
SuperVersion* super_version, Arena* arena, SuperVersion* super_version,
RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence) { Arena* arena,
RangeDelAggregator* range_del_agg,
SequenceNumber sequence) {
InternalIterator* internal_iter; InternalIterator* internal_iter;
assert(arena != nullptr); assert(arena != nullptr);
assert(range_del_agg != nullptr); assert(range_del_agg != nullptr);

@ -31,7 +31,7 @@
#include "db/log_writer.h" #include "db/log_writer.h"
#include "db/logs_with_prep_tracker.h" #include "db/logs_with_prep_tracker.h"
#include "db/pre_release_callback.h" #include "db/pre_release_callback.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/read_callback.h" #include "db/read_callback.h"
#include "db/snapshot_checker.h" #include "db/snapshot_checker.h"
#include "db/snapshot_impl.h" #include "db/snapshot_impl.h"
@ -375,8 +375,8 @@ class DBImpl : public DB {
// The keys of this iterator are internal keys (see format.h). // The keys of this iterator are internal keys (see format.h).
// The returned iterator should be deleted when no longer needed. // The returned iterator should be deleted when no longer needed.
InternalIterator* NewInternalIterator( InternalIterator* NewInternalIterator(
Arena* arena, RangeDelAggregatorV2* range_del_agg, Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
SequenceNumber sequence, ColumnFamilyHandle* column_family = nullptr); ColumnFamilyHandle* column_family = nullptr);
LogsWithPrepTracker* logs_with_prep_tracker() { LogsWithPrepTracker* logs_with_prep_tracker() {
return &logs_with_prep_tracker_; return &logs_with_prep_tracker_;
@ -579,12 +579,9 @@ class DBImpl : public DB {
const WriteController& write_controller() { return write_controller_; } const WriteController& write_controller() { return write_controller_; }
InternalIterator* NewInternalIterator(const ReadOptions&, InternalIterator* NewInternalIterator(
ColumnFamilyData* cfd, const ReadOptions&, ColumnFamilyData* cfd, SuperVersion* super_version,
SuperVersion* super_version, Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence);
Arena* arena,
RangeDelAggregatorV2* range_del_agg,
SequenceNumber sequence);
// hollow transactions shell used for recovery. // hollow transactions shell used for recovery.
// these will then be passed to TransactionDB so that // these will then be passed to TransactionDB so that

@ -9,7 +9,6 @@
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/db_iter.h" #include "db/db_iter.h"
#include "db/merge_context.h" #include "db/merge_context.h"
#include "db/range_del_aggregator.h"
#include "monitoring/perf_context_imp.h" #include "monitoring/perf_context_imp.h"
namespace rocksdb { namespace rocksdb {

@ -171,7 +171,7 @@ class DBIter final: public Iterator {
iter_ = iter; iter_ = iter;
iter_->SetPinnedItersMgr(&pinned_iters_mgr_); iter_->SetPinnedItersMgr(&pinned_iters_mgr_);
} }
virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator() { virtual ReadRangeDelAggregator* GetRangeDelAggregator() {
return &range_del_agg_; return &range_del_agg_;
} }
@ -341,7 +341,7 @@ class DBIter final: public Iterator {
const bool total_order_seek_; const bool total_order_seek_;
// List of operands for merge operator. // List of operands for merge operator.
MergeContext merge_context_; MergeContext merge_context_;
ReadRangeDelAggregatorV2 range_del_agg_; ReadRangeDelAggregator range_del_agg_;
LocalStatistics local_stats_; LocalStatistics local_stats_;
PinnedIteratorsManager pinned_iters_mgr_; PinnedIteratorsManager pinned_iters_mgr_;
ReadCallback* read_callback_; ReadCallback* read_callback_;
@ -1479,7 +1479,7 @@ Iterator* NewDBIterator(Env* env, const ReadOptions& read_options,
ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); } ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); }
ReadRangeDelAggregatorV2* ArenaWrappedDBIter::GetRangeDelAggregator() { ReadRangeDelAggregator* ArenaWrappedDBIter::GetRangeDelAggregator() {
return db_iter_->GetRangeDelAggregator(); return db_iter_->GetRangeDelAggregator();
} }

@ -12,7 +12,7 @@
#include <string> #include <string>
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "options/cf_options.h" #include "options/cf_options.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/iterator.h" #include "rocksdb/iterator.h"
@ -48,7 +48,7 @@ class ArenaWrappedDBIter : public Iterator {
// Get the arena to be used to allocate memory for DBIter to be wrapped, // Get the arena to be used to allocate memory for DBIter to be wrapped,
// as well as child iterators in it. // as well as child iterators in it.
virtual Arena* GetArena() { return &arena_; } virtual Arena* GetArena() { return &arena_; }
virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator(); virtual ReadRangeDelAggregator* GetRangeDelAggregator();
// Set the internal iterator wrapped inside the DB Iterator. Usually it is // Set the internal iterator wrapped inside the DB Iterator. Usually it is
// a merging iterator. // a merging iterator.

@ -8,6 +8,7 @@
#include "db/db_test_util.h" #include "db/db_test_util.h"
#include "db/memtable.h" #include "db/memtable.h"
#include "db/range_del_aggregator.h"
#include "port/stack_trace.h" #include "port/stack_trace.h"
#include "rocksdb/memtablerep.h" #include "rocksdb/memtablerep.h"
#include "rocksdb/slice_transform.h" #include "rocksdb/slice_transform.h"
@ -135,7 +136,8 @@ TEST_F(DBMemTableTest, DuplicateSeq) {
MergeContext merge_context; MergeContext merge_context;
Options options; Options options;
InternalKeyComparator ikey_cmp(options.comparator); InternalKeyComparator ikey_cmp(options.comparator);
RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */); ReadRangeDelAggregator range_del_agg(&ikey_cmp,
kMaxSequenceNumber /* upper_bound */);
// Create a MemTable // Create a MemTable
InternalKeyComparator cmp(BytewiseComparator()); InternalKeyComparator cmp(BytewiseComparator());

@ -814,7 +814,7 @@ std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
Arena arena; Arena arena;
auto options = CurrentOptions(); auto options = CurrentOptions();
InternalKeyComparator icmp(options.comparator); InternalKeyComparator icmp(options.comparator);
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
ScopedArenaIterator iter; ScopedArenaIterator iter;
if (cf == 0) { if (cf == 0) {
@ -1227,7 +1227,7 @@ void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
Arena arena; Arena arena;
auto options = CurrentOptions(); auto options = CurrentOptions();
InternalKeyComparator icmp(options.comparator); InternalKeyComparator icmp(options.comparator);
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
// This should be defined after range_del_agg so that it destructs the // This should be defined after range_del_agg so that it destructs the
// assigned iterator before it range_del_agg is already destructed. // assigned iterator before it range_del_agg is already destructed.
@ -1437,7 +1437,7 @@ void DBTestBase::VerifyDBInternal(
std::vector<std::pair<std::string, std::string>> true_data) { std::vector<std::pair<std::string, std::string>> true_data) {
Arena arena; Arena arena;
InternalKeyComparator icmp(last_options_.comparator); InternalKeyComparator icmp(last_options_.comparator);
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
auto iter = auto iter =
dbfull()->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber); dbfull()->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber);

@ -15,7 +15,7 @@
#include "db/db_iter.h" #include "db/db_iter.h"
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/job_context.h" #include "db/job_context.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/range_tombstone_fragmenter.h" #include "db/range_tombstone_fragmenter.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
@ -73,8 +73,8 @@ class ForwardLevelIterator : public InternalIterator {
delete file_iter_; delete file_iter_;
} }
ReadRangeDelAggregatorV2 range_del_agg( ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
&cfd_->internal_comparator(), kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
file_iter_ = cfd_->table_cache()->NewIterator( file_iter_ = cfd_->table_cache()->NewIterator(
read_options_, *(cfd_->soptions()), cfd_->internal_comparator(), read_options_, *(cfd_->soptions()), cfd_->internal_comparator(),
*files_[file_index_], *files_[file_index_],
@ -610,7 +610,7 @@ void ForwardIterator::RebuildIterators(bool refresh_sv) {
// New // New
sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_)); sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_));
} }
ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(), ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_); mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_);
sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_); sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_);
@ -669,7 +669,7 @@ void ForwardIterator::RenewIterators() {
mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_); mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_);
svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_); svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_);
ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(), ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
if (!read_options_.ignore_range_deletions) { if (!read_options_.ignore_range_deletions) {
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter( std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(

@ -159,7 +159,7 @@ bool MemTableListVersion::GetFromList(
Status MemTableListVersion::AddRangeTombstoneIterators( Status MemTableListVersion::AddRangeTombstoneIterators(
const ReadOptions& read_opts, Arena* /*arena*/, const ReadOptions& read_opts, Arena* /*arena*/,
RangeDelAggregatorV2* range_del_agg) { RangeDelAggregator* range_del_agg) {
assert(range_del_agg != nullptr); assert(range_del_agg != nullptr);
for (auto& m : memlist_) { for (auto& m : memlist_) {
// Using kMaxSequenceNumber is OK because these are immutable memtables. // Using kMaxSequenceNumber is OK because these are immutable memtables.

@ -15,7 +15,7 @@
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/logs_with_prep_tracker.h" #include "db/logs_with_prep_tracker.h"
#include "db/memtable.h" #include "db/memtable.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "monitoring/instrumented_mutex.h" #include "monitoring/instrumented_mutex.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/iterator.h" #include "rocksdb/iterator.h"
@ -91,7 +91,7 @@ class MemTableListVersion {
} }
Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena, Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena,
RangeDelAggregatorV2* range_del_agg); RangeDelAggregator* range_del_agg);
void AddIterators(const ReadOptions& options, void AddIterators(const ReadOptions& options,
std::vector<InternalIterator*>* iterator_list, std::vector<InternalIterator*>* iterator_list,

@ -8,7 +8,6 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "db/merge_context.h" #include "db/merge_context.h"
#include "db/range_del_aggregator.h"
#include "db/version_set.h" #include "db/version_set.h"
#include "db/write_controller.h" #include "db/write_controller.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"

@ -79,7 +79,8 @@ class MergeContext {
return GetOperandsDirectionForward(); return GetOperandsDirectionForward();
} }
// Return all the operands in the order as they were merged (passed to FullMerge or FullMergeV2) // Return all the operands in the order as they were merged (passed to
// FullMerge or FullMergeV2)
const std::vector<Slice>& GetOperandsDirectionForward() { const std::vector<Slice>& GetOperandsDirectionForward() {
if (!operand_list_) { if (!operand_list_) {
return empty_operand_list; return empty_operand_list;
@ -89,7 +90,8 @@ class MergeContext {
return *operand_list_; return *operand_list_;
} }
// Return all the operands in the reversed order relative to how they were merged (passed to FullMerge or FullMergeV2) // Return all the operands in the reversed order relative to how they were
// merged (passed to FullMerge or FullMergeV2)
const std::vector<Slice>& GetOperandsDirectionBackward() { const std::vector<Slice>& GetOperandsDirectionBackward() {
if (!operand_list_) { if (!operand_list_) {
return empty_operand_list; return empty_operand_list;

@ -114,7 +114,7 @@ Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
// TODO: Avoid the snapshot stripe map lookup in CompactionRangeDelAggregator // TODO: Avoid the snapshot stripe map lookup in CompactionRangeDelAggregator
// and just pass the StripeRep corresponding to the stripe being merged. // and just pass the StripeRep corresponding to the stripe being merged.
Status MergeHelper::MergeUntil(InternalIterator* iter, Status MergeHelper::MergeUntil(InternalIterator* iter,
CompactionRangeDelAggregatorV2* range_del_agg, CompactionRangeDelAggregator* range_del_agg,
const SequenceNumber stop_before, const SequenceNumber stop_before,
const bool at_bottom) { const bool at_bottom) {
// Get a copy of the internal key, before it's invalidated by iter->Next() // Get a copy of the internal key, before it's invalidated by iter->Next()

@ -11,7 +11,7 @@
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/merge_context.h" #include "db/merge_context.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/snapshot_checker.h" #include "db/snapshot_checker.h"
#include "rocksdb/compaction_filter.h" #include "rocksdb/compaction_filter.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
@ -78,7 +78,7 @@ class MergeHelper {
// //
// REQUIRED: The first key in the input is not corrupted. // REQUIRED: The first key in the input is not corrupted.
Status MergeUntil(InternalIterator* iter, Status MergeUntil(InternalIterator* iter,
CompactionRangeDelAggregatorV2* range_del_agg = nullptr, CompactionRangeDelAggregator* range_del_agg = nullptr,
const SequenceNumber stop_before = 0, const SequenceNumber stop_before = 0,
const bool at_bottom = false); const bool at_bottom = false);

File diff suppressed because it is too large Load Diff

@ -1,10 +1,12 @@
// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. // Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the // This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License // COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory). // (found in the LICENSE.Apache file in the root directory).
#pragma once #pragma once
#include <algorithm>
#include <iterator>
#include <list> #include <list>
#include <map> #include <map>
#include <set> #include <set>
@ -14,220 +16,422 @@
#include "db/compaction_iteration_stats.h" #include "db/compaction_iteration_stats.h"
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/pinned_iterators_manager.h" #include "db/pinned_iterators_manager.h"
#include "db/range_del_aggregator.h"
#include "db/range_tombstone_fragmenter.h"
#include "db/version_edit.h" #include "db/version_edit.h"
#include "include/rocksdb/comparator.h" #include "include/rocksdb/comparator.h"
#include "include/rocksdb/types.h" #include "include/rocksdb/types.h"
#include "table/internal_iterator.h" #include "table/internal_iterator.h"
#include "table/scoped_arena_iterator.h" #include "table/scoped_arena_iterator.h"
#include "table/table_builder.h" #include "table/table_builder.h"
#include "util/heap.h"
#include "util/kv_map.h" #include "util/kv_map.h"
namespace rocksdb { namespace rocksdb {
// RangeDelMaps maintain position across calls to ShouldDelete. The caller may class TruncatedRangeDelIterator {
// wish to specify a mode to optimize positioning the iterator during the next public:
// call to ShouldDelete. The non-kFullScan modes are only available when TruncatedRangeDelIterator(
// deletion collapsing is enabled. std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
// const InternalKeyComparator* icmp, const InternalKey* smallest,
// For example, if we invoke Next() on an iterator, kForwardTraversal should be const InternalKey* largest);
// specified to advance one-by-one through deletions until one is found with its
// interval containing the key. This will typically be faster than doing a full bool Valid() const;
// binary search (kBinarySearch).
enum class RangeDelPositioningMode { void Next();
kFullScan, // used iff collapse_deletions_ == false void Prev();
kForwardTraversal,
kBackwardTraversal, void InternalNext();
kBinarySearch,
// Seeks to the tombstone with the highest viisble sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at
// the earliest tombstone that ends after target.
void Seek(const Slice& target);
// Seeks to the tombstone with the highest viisble sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at
// the latest tombstone that starts before target.
void SeekForPrev(const Slice& target);
void SeekToFirst();
void SeekToLast();
ParsedInternalKey start_key() const {
return (smallest_ == nullptr ||
icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0)
? iter_->parsed_start_key()
: *smallest_;
}
ParsedInternalKey end_key() const {
return (largest_ == nullptr ||
icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0)
? iter_->parsed_end_key()
: *largest_;
}
SequenceNumber seq() const { return iter_->seq(); }
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
SplitBySnapshot(const std::vector<SequenceNumber>& snapshots);
SequenceNumber upper_bound() const { return iter_->upper_bound(); }
SequenceNumber lower_bound() const { return iter_->lower_bound(); }
private:
std::unique_ptr<FragmentedRangeTombstoneIterator> iter_;
const InternalKeyComparator* icmp_;
const ParsedInternalKey* smallest_ = nullptr;
const ParsedInternalKey* largest_ = nullptr;
std::list<ParsedInternalKey> pinned_bounds_;
const InternalKey* smallest_ikey_;
const InternalKey* largest_ikey_;
};
struct SeqMaxComparator {
bool operator()(const TruncatedRangeDelIterator* a,
const TruncatedRangeDelIterator* b) const {
return a->seq() > b->seq();
}
}; };
// TruncatedRangeTombstones are a slight generalization of regular struct StartKeyMinComparator {
// RangeTombstones that can represent truncations caused by SST boundaries. explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
// Instead of using user keys to represent the start and end keys, they instead
// use internal keys, whose sequence number indicates the sequence number of bool operator()(const TruncatedRangeDelIterator* a,
// the smallest/largest SST key (in the case where a tombstone is untruncated, const TruncatedRangeDelIterator* b) const {
// the sequence numbers will be kMaxSequenceNumber for both start and end return icmp->Compare(a->start_key(), b->start_key()) > 0;
// keys). Like RangeTombstones, TruncatedRangeTombstone are also }
// end-key-exclusive.
struct TruncatedRangeTombstone { const InternalKeyComparator* icmp;
TruncatedRangeTombstone(const ParsedInternalKey& sk,
const ParsedInternalKey& ek, SequenceNumber s)
: start_key_(sk), end_key_(ek), seq_(s) {}
RangeTombstone Tombstone() const {
// The RangeTombstone returned here can cover less than the
// TruncatedRangeTombstone when its end key has a seqnum that is not
// kMaxSequenceNumber. Since this method is only used by RangeDelIterators
// (which in turn are only used during flush/compaction), we avoid this
// problem by using truncation boundaries spanning multiple SSTs, which
// are selected in a way that guarantee a clean break at the end key.
assert(end_key_.sequence == kMaxSequenceNumber);
return RangeTombstone(start_key_.user_key, end_key_.user_key, seq_);
}
ParsedInternalKey start_key_;
ParsedInternalKey end_key_;
SequenceNumber seq_;
}; };
// A RangeDelIterator iterates over range deletion tombstones. class ForwardRangeDelIterator {
class RangeDelIterator {
public: public:
virtual ~RangeDelIterator() = default; ForwardRangeDelIterator(
const InternalKeyComparator* icmp,
virtual bool Valid() const = 0; const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
virtual void Next() = 0;
// NOTE: the Slice passed to this method must be a user key. bool ShouldDelete(const ParsedInternalKey& parsed);
virtual void Seek(const Slice& target) = 0; void Invalidate();
virtual void Seek(const ParsedInternalKey& target) = 0;
virtual RangeTombstone Tombstone() const = 0; void AddNewIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
iter->Seek(parsed.user_key);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
size_t UnusedIdx() const { return unused_idx_; }
void IncUnusedIdx() { unused_idx_++; }
private:
using ActiveSeqSet =
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
struct EndKeyMinComparator {
explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const ActiveSeqSet::const_iterator& a,
const ActiveSeqSet::const_iterator& b) const {
return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0;
}
const InternalKeyComparator* icmp;
};
void PushIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
if (!iter->Valid()) {
// The iterator has been fully consumed, so we don't need to add it to
// either of the heaps.
return;
}
int cmp = icmp_->Compare(parsed, iter->start_key());
if (cmp < 0) {
PushInactiveIter(iter);
} else {
PushActiveIter(iter);
}
}
void PushActiveIter(TruncatedRangeDelIterator* iter) {
auto seq_pos = active_seqnums_.insert(iter);
active_iters_.push(seq_pos);
}
TruncatedRangeDelIterator* PopActiveIter() {
auto active_top = active_iters_.top();
auto iter = *active_top;
active_iters_.pop();
active_seqnums_.erase(active_top);
return iter;
}
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
inactive_iters_.push(iter);
}
TruncatedRangeDelIterator* PopInactiveIter() {
auto* iter = inactive_iters_.top();
inactive_iters_.pop();
return iter;
}
const InternalKeyComparator* icmp_;
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
size_t unused_idx_;
ActiveSeqSet active_seqnums_;
BinaryHeap<ActiveSeqSet::const_iterator, EndKeyMinComparator> active_iters_;
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> inactive_iters_;
}; };
// A RangeDelMap keeps track of range deletion tombstones within a snapshot class ReverseRangeDelIterator {
// stripe.
//
// RangeDelMaps are used internally by RangeDelAggregator. They are not intended
// to be used directly.
class RangeDelMap {
public: public:
virtual ~RangeDelMap() = default; ReverseRangeDelIterator(
const InternalKeyComparator* icmp,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
virtual bool ShouldDelete(const ParsedInternalKey& parsed, bool ShouldDelete(const ParsedInternalKey& parsed);
RangeDelPositioningMode mode) = 0; void Invalidate();
virtual bool IsRangeOverlapped(const ParsedInternalKey& start,
const ParsedInternalKey& end) = 0; void AddNewIter(TruncatedRangeDelIterator* iter,
virtual void InvalidatePosition() = 0; const ParsedInternalKey& parsed) {
iter->SeekForPrev(parsed.user_key);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
size_t UnusedIdx() const { return unused_idx_; }
void IncUnusedIdx() { unused_idx_++; }
private:
using ActiveSeqSet =
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
struct EndKeyMaxComparator {
explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const TruncatedRangeDelIterator* a,
const TruncatedRangeDelIterator* b) const {
return icmp->Compare(a->end_key(), b->end_key()) < 0;
}
const InternalKeyComparator* icmp;
};
struct StartKeyMaxComparator {
explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const ActiveSeqSet::const_iterator& a,
const ActiveSeqSet::const_iterator& b) const {
return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0;
}
const InternalKeyComparator* icmp;
};
virtual size_t Size() const = 0; void PushIter(TruncatedRangeDelIterator* iter,
bool IsEmpty() const { return Size() == 0; } const ParsedInternalKey& parsed) {
if (!iter->Valid()) {
// The iterator has been fully consumed, so we don't need to add it to
// either of the heaps.
} else if (icmp_->Compare(iter->end_key(), parsed) <= 0) {
PushInactiveIter(iter);
} else {
PushActiveIter(iter);
}
}
void PushActiveIter(TruncatedRangeDelIterator* iter) {
auto seq_pos = active_seqnums_.insert(iter);
active_iters_.push(seq_pos);
}
TruncatedRangeDelIterator* PopActiveIter() {
auto active_top = active_iters_.top();
auto iter = *active_top;
active_iters_.pop();
active_seqnums_.erase(active_top);
return iter;
}
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
inactive_iters_.push(iter);
}
virtual void AddTombstone(TruncatedRangeTombstone tombstone) = 0; TruncatedRangeDelIterator* PopInactiveIter() {
virtual std::unique_ptr<RangeDelIterator> NewIterator() = 0; auto* iter = inactive_iters_.top();
inactive_iters_.pop();
return iter;
}
const InternalKeyComparator* icmp_;
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
size_t unused_idx_;
ActiveSeqSet active_seqnums_;
BinaryHeap<ActiveSeqSet::const_iterator, StartKeyMaxComparator> active_iters_;
BinaryHeap<TruncatedRangeDelIterator*, EndKeyMaxComparator> inactive_iters_;
}; };
// A RangeDelAggregator aggregates range deletion tombstones as they are enum class RangeDelPositioningMode { kForwardTraversal, kBackwardTraversal };
// encountered in memtables/SST files. It provides methods that check whether a
// key is covered by range tombstones or write the relevant tombstones to a new
// SST file.
class RangeDelAggregator { class RangeDelAggregator {
public: public:
// @param snapshots These are used to organize the tombstones into snapshot explicit RangeDelAggregator(const InternalKeyComparator* icmp)
// stripes, which is the seqnum range between consecutive snapshots, : icmp_(icmp) {}
// including the higher snapshot and excluding the lower one. Currently, virtual ~RangeDelAggregator() {}
// this is used by ShouldDelete() to prevent deletion of keys that are
// covered by range tombstones in other snapshot stripes. This constructor virtual void AddTombstones(
// is used for writes (flush/compaction). All DB snapshots are provided std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
// such that no keys are removed that are uncovered according to any DB const InternalKey* smallest = nullptr,
// snapshot. const InternalKey* largest = nullptr) = 0;
// Note this overload does not lazily initialize Rep.
RangeDelAggregator(const InternalKeyComparator& icmp, bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) {
const std::vector<SequenceNumber>& snapshots, ParsedInternalKey parsed;
bool collapse_deletions = true); if (!ParseInternalKey(key, &parsed)) {
// @param upper_bound Similar to snapshots above, except with a single
// snapshot, which allows us to store the snapshot on the stack and defer
// initialization of heap-allocating members (in Rep) until the first range
// deletion is encountered. This constructor is used in case of reads (get/
// iterator), for which only the user snapshot (upper_bound) is provided
// such that the seqnum space is divided into two stripes. Only the older
// stripe will be used by ShouldDelete().
RangeDelAggregator(const InternalKeyComparator& icmp,
SequenceNumber upper_bound,
bool collapse_deletions = false);
// Returns whether the key should be deleted, which is the case when it is
// covered by a range tombstone residing in the same snapshot stripe.
// @param mode If collapse_deletions_ is true, this dictates how we will find
// the deletion whose interval contains this key. Otherwise, its
// value must be kFullScan indicating linear scan from beginning.
bool ShouldDelete(
const ParsedInternalKey& parsed,
RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) {
if (rep_ == nullptr) {
return false; return false;
} }
return ShouldDeleteImpl(parsed, mode); return ShouldDelete(parsed, mode);
} }
bool ShouldDelete( virtual bool ShouldDelete(const ParsedInternalKey& parsed,
const Slice& internal_key, RangeDelPositioningMode mode) = 0;
RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) {
if (rep_ == nullptr) { virtual void InvalidateRangeDelMapPositions() = 0;
return false;
virtual bool IsEmpty() const = 0;
bool AddFile(uint64_t file_number) {
return files_seen_.insert(file_number).second;
} }
return ShouldDeleteImpl(internal_key, mode);
protected:
class StripeRep {
public:
StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound,
SequenceNumber lower_bound)
: icmp_(icmp),
forward_iter_(icmp, &iters_),
reverse_iter_(icmp, &iters_),
upper_bound_(upper_bound),
lower_bound_(lower_bound) {}
void AddTombstones(std::unique_ptr<TruncatedRangeDelIterator> input_iter) {
iters_.push_back(std::move(input_iter));
} }
bool ShouldDeleteImpl(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode); bool IsEmpty() const { return iters_.empty(); }
bool ShouldDeleteImpl(const Slice& internal_key,
bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode); RangeDelPositioningMode mode);
// Checks whether range deletions cover any keys between `start` and `end`, void Invalidate() {
// inclusive. InvalidateForwardIter();
// InvalidateReverseIter();
// @param start User key representing beginning of range to check for overlap. }
// @param end User key representing end of range to check for overlap. This
// argument is inclusive, so the existence of a range deletion covering
// `end` causes this to return true.
bool IsRangeOverlapped(const Slice& start, const Slice& end); bool IsRangeOverlapped(const Slice& start, const Slice& end);
// Adds tombstones to the tombstone aggregation structure maintained by this private:
// object. Tombstones are truncated to smallest and largest. If smallest (or bool InStripe(SequenceNumber seq) const {
// largest) is null, it is not used for truncation. When adding range return lower_bound_ <= seq && seq <= upper_bound_;
// tombstones present in an sstable, smallest and largest should be set to }
// the smallest and largest keys from the sstable file metadata. Note that
// tombstones end keys are exclusive while largest is inclusive. void InvalidateForwardIter() { forward_iter_.Invalidate(); }
// @return non-OK status if any of the tombstone keys are corrupted.
Status AddTombstones(std::unique_ptr<InternalIterator> input, void InvalidateReverseIter() { reverse_iter_.Invalidate(); }
const InternalKeyComparator* icmp_;
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> iters_;
ForwardRangeDelIterator forward_iter_;
ReverseRangeDelIterator reverse_iter_;
SequenceNumber upper_bound_;
SequenceNumber lower_bound_;
};
const InternalKeyComparator* icmp_;
private:
std::set<uint64_t> files_seen_;
};
class ReadRangeDelAggregator : public RangeDelAggregator {
public:
ReadRangeDelAggregator(const InternalKeyComparator* icmp,
SequenceNumber upper_bound)
: RangeDelAggregator(icmp),
rep_(icmp, upper_bound, 0 /* lower_bound */) {}
~ReadRangeDelAggregator() override {}
using RangeDelAggregator::ShouldDelete;
void AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest = nullptr, const InternalKey* smallest = nullptr,
const InternalKey* largest = nullptr); const InternalKey* largest = nullptr) override;
// Resets iterators maintained across calls to ShouldDelete(). This may be bool ShouldDelete(const ParsedInternalKey& parsed,
// called when the tombstones change, or the owner may call explicitly, e.g., RangeDelPositioningMode mode) override;
// if it's an iterator that just seeked to an arbitrary position. The effect
// of invalidation is that the following call to ShouldDelete() will binary bool IsRangeOverlapped(const Slice& start, const Slice& end);
// search for its tombstone.
void InvalidateRangeDelMapPositions(); void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); }
bool IsEmpty(); bool IsEmpty() const override { return rep_.IsEmpty(); }
bool AddFile(uint64_t file_number);
// Create a new iterator over the range deletion tombstones in all of the
// snapshot stripes in this aggregator. Tombstones are presented in start key
// order. Tombstones with the same start key are presented in arbitrary order.
//
// The iterator is invalidated after any call to AddTombstones. It is the
// caller's responsibility to avoid using invalid iterators.
std::unique_ptr<RangeDelIterator> NewIterator();
private: private:
// Maps snapshot seqnum -> map of tombstones that fall in that stripe, i.e., StripeRep rep_;
// their seqnums are greater than the next smaller snapshot's seqnum, and the
// corresponding index into the list of snapshots. Each entry is lazily
// initialized.
typedef std::map<SequenceNumber,
std::pair<std::unique_ptr<RangeDelMap>, size_t>>
StripeMap;
struct Rep {
std::vector<SequenceNumber> snapshots_;
StripeMap stripe_map_;
PinnedIteratorsManager pinned_iters_mgr_;
std::list<std::string> pinned_slices_;
std::set<uint64_t> added_files_;
}; };
// Initializes rep_ lazily. This aggregator object is constructed for every
// read, so expensive members should only be created when necessary, i.e.,
// once the first range deletion is encountered.
void InitRep(const std::vector<SequenceNumber>& snapshots);
std::unique_ptr<RangeDelMap> NewRangeDelMap(); class CompactionRangeDelAggregator : public RangeDelAggregator {
RangeDelMap* GetRangeDelMapIfExists(SequenceNumber seq); public:
RangeDelMap& GetRangeDelMap(SequenceNumber seq); CompactionRangeDelAggregator(const InternalKeyComparator* icmp,
const std::vector<SequenceNumber>& snapshots)
: RangeDelAggregator(icmp), snapshots_(&snapshots) {}
~CompactionRangeDelAggregator() override {}
SequenceNumber upper_bound_; void AddTombstones(
std::unique_ptr<Rep> rep_; std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKeyComparator& icmp_; const InternalKey* smallest = nullptr,
// collapse range deletions so they're binary searchable const InternalKey* largest = nullptr) override;
const bool collapse_deletions_;
using RangeDelAggregator::ShouldDelete;
bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode) override;
bool IsRangeOverlapped(const Slice& start, const Slice& end);
void InvalidateRangeDelMapPositions() override {
for (auto& rep : reps_) {
rep.second.Invalidate();
}
}
bool IsEmpty() const override {
for (const auto& rep : reps_) {
if (!rep.second.IsEmpty()) {
return false;
}
}
return true;
}
// Creates an iterator over all the range tombstones in the aggregator, for
// use in compaction. Nullptr arguments indicate that the iterator range is
// unbounded.
// NOTE: the boundaries are used for optimization purposes to reduce the
// number of tombstones that are passed to the fragmenter; they do not
// guarantee that the resulting iterator only contains range tombstones that
// cover keys in the provided range. If required, these bounds must be
// enforced during iteration.
std::unique_ptr<FragmentedRangeTombstoneIterator> NewIterator(
const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr,
bool upper_bound_inclusive = false);
private:
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> parent_iters_;
std::map<SequenceNumber, StripeRep> reps_;
const std::vector<SequenceNumber>* snapshots_;
}; };
} // namespace rocksdb } // namespace rocksdb

@ -20,7 +20,6 @@ int main() {
#include <vector> #include <vector>
#include "db/range_del_aggregator.h" #include "db/range_del_aggregator.h"
#include "db/range_del_aggregator_v2.h"
#include "db/range_tombstone_fragmenter.h" #include "db/range_tombstone_fragmenter.h"
#include "rocksdb/comparator.h" #include "rocksdb/comparator.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
@ -48,8 +47,6 @@ DEFINE_double(tombstone_width_mean, 100.0, "average range tombstone width");
DEFINE_double(tombstone_width_stddev, 0.0, DEFINE_double(tombstone_width_stddev, 0.0,
"standard deviation of range tombstone width"); "standard deviation of range tombstone width");
DEFINE_bool(use_collapsed, true, "use the collapsed range tombstone map");
DEFINE_int32(seed, 0, "random number generator seed"); DEFINE_int32(seed, 0, "random number generator seed");
DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run"); DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run");
@ -57,8 +54,6 @@ DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run");
DEFINE_int32(add_tombstones_per_run, 1, DEFINE_int32(add_tombstones_per_run, 1,
"number of AddTombstones calls per run"); "number of AddTombstones calls per run");
DEFINE_bool(use_v2_aggregator, false, "benchmark RangeDelAggregatorV2");
namespace { namespace {
struct Stats { struct Stats {
@ -187,14 +182,10 @@ int main(int argc, char** argv) {
std::vector<rocksdb::PersistentRangeTombstone>( std::vector<rocksdb::PersistentRangeTombstone>(
FLAGS_num_range_tombstones); FLAGS_num_range_tombstones);
} }
auto mode = FLAGS_use_collapsed auto mode = rocksdb::RangeDelPositioningMode::kForwardTraversal;
? rocksdb::RangeDelPositioningMode::kForwardTraversal
: rocksdb::RangeDelPositioningMode::kFullScan;
for (int i = 0; i < FLAGS_num_runs; i++) { for (int i = 0; i < FLAGS_num_runs; i++) {
rocksdb::RangeDelAggregator range_del_agg(icmp, {} /* snapshots */, rocksdb::ReadRangeDelAggregator range_del_agg(
FLAGS_use_collapsed);
rocksdb::ReadRangeDelAggregatorV2 range_del_agg_v2(
&icmp, rocksdb::kMaxSequenceNumber /* upper_bound */); &icmp, rocksdb::kMaxSequenceNumber /* upper_bound */);
std::vector<std::unique_ptr<rocksdb::FragmentedRangeTombstoneList> > std::vector<std::unique_ptr<rocksdb::FragmentedRangeTombstoneList> >
@ -223,18 +214,11 @@ int main(int argc, char** argv) {
fragmented_range_tombstone_lists.back().get(), icmp, fragmented_range_tombstone_lists.back().get(), icmp,
rocksdb::kMaxSequenceNumber)); rocksdb::kMaxSequenceNumber));
if (FLAGS_use_v2_aggregator) { rocksdb::StopWatchNano stop_watch_add_tombstones(rocksdb::Env::Default(),
rocksdb::StopWatchNano stop_watch_add_tombstones( true /* auto_start */);
rocksdb::Env::Default(), true /* auto_start */); range_del_agg.AddTombstones(std::move(fragmented_range_del_iter));
range_del_agg_v2.AddTombstones(std::move(fragmented_range_del_iter));
stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos();
} else {
rocksdb::StopWatchNano stop_watch_add_tombstones(
rocksdb::Env::Default(), true /* auto_start */);
range_del_agg.AddTombstones(std::move(range_del_iter));
stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos(); stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos();
} }
}
rocksdb::ParsedInternalKey parsed_key; rocksdb::ParsedInternalKey parsed_key;
parsed_key.sequence = FLAGS_num_range_tombstones / 2; parsed_key.sequence = FLAGS_num_range_tombstones / 2;
@ -247,18 +231,10 @@ int main(int argc, char** argv) {
std::string key_string = rocksdb::Key(first_key + j); std::string key_string = rocksdb::Key(first_key + j);
parsed_key.user_key = key_string; parsed_key.user_key = key_string;
uint64_t call_time;
if (FLAGS_use_v2_aggregator) {
rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(),
true /* auto_start */);
range_del_agg_v2.ShouldDelete(parsed_key, mode);
call_time = stop_watch_should_delete.ElapsedNanos();
} else {
rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(), rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(),
true /* auto_start */); true /* auto_start */);
range_del_agg.ShouldDelete(parsed_key, mode); range_del_agg.ShouldDelete(parsed_key, mode);
call_time = stop_watch_should_delete.ElapsedNanos(); uint64_t call_time = stop_watch_should_delete.ElapsedNanos();
}
if (j == 0) { if (j == 0) {
stats.time_first_should_delete += call_time; stats.time_first_should_delete += call_time;

File diff suppressed because it is too large Load Diff

@ -1,492 +0,0 @@
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "db/range_del_aggregator_v2.h"
#include "db/compaction_iteration_stats.h"
#include "db/dbformat.h"
#include "db/pinned_iterators_manager.h"
#include "db/range_del_aggregator.h"
#include "db/range_tombstone_fragmenter.h"
#include "db/version_edit.h"
#include "include/rocksdb/comparator.h"
#include "include/rocksdb/types.h"
#include "table/internal_iterator.h"
#include "table/scoped_arena_iterator.h"
#include "table/table_builder.h"
#include "util/heap.h"
#include "util/kv_map.h"
#include "util/vector_iterator.h"
namespace rocksdb {
TruncatedRangeDelIterator::TruncatedRangeDelIterator(
std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
const InternalKeyComparator* icmp, const InternalKey* smallest,
const InternalKey* largest)
: iter_(std::move(iter)),
icmp_(icmp),
smallest_ikey_(smallest),
largest_ikey_(largest) {
if (smallest != nullptr) {
pinned_bounds_.emplace_back();
auto& parsed_smallest = pinned_bounds_.back();
if (!ParseInternalKey(smallest->Encode(), &parsed_smallest)) {
assert(false);
}
smallest_ = &parsed_smallest;
}
if (largest != nullptr) {
pinned_bounds_.emplace_back();
auto& parsed_largest = pinned_bounds_.back();
if (!ParseInternalKey(largest->Encode(), &parsed_largest)) {
assert(false);
}
if (parsed_largest.type == kTypeRangeDeletion &&
parsed_largest.sequence == kMaxSequenceNumber) {
// The file boundary has been artificially extended by a range tombstone.
// We do not need to adjust largest to properly truncate range
// tombstones that extend past the boundary.
} else if (parsed_largest.sequence == 0) {
// The largest key in the sstable has a sequence number of 0. Since we
// guarantee that no internal keys with the same user key and sequence
// number can exist in a DB, we know that the largest key in this sstable
// cannot exist as the smallest key in the next sstable. This further
// implies that no range tombstone in this sstable covers largest;
// otherwise, the file boundary would have been artificially extended.
//
// Therefore, we will never truncate a range tombstone at largest, so we
// can leave it unchanged.
} else {
// The same user key may straddle two sstable boundaries. To ensure that
// the truncated end key can cover the largest key in this sstable, reduce
// its sequence number by 1.
parsed_largest.sequence -= 1;
}
largest_ = &parsed_largest;
}
}
bool TruncatedRangeDelIterator::Valid() const {
return iter_->Valid() &&
(smallest_ == nullptr ||
icmp_->Compare(*smallest_, iter_->parsed_end_key()) < 0) &&
(largest_ == nullptr ||
icmp_->Compare(iter_->parsed_start_key(), *largest_) < 0);
}
void TruncatedRangeDelIterator::Next() { iter_->TopNext(); }
void TruncatedRangeDelIterator::Prev() { iter_->TopPrev(); }
void TruncatedRangeDelIterator::InternalNext() { iter_->Next(); }
// NOTE: target is a user key
void TruncatedRangeDelIterator::Seek(const Slice& target) {
if (largest_ != nullptr &&
icmp_->Compare(*largest_, ParsedInternalKey(target, kMaxSequenceNumber,
kTypeRangeDeletion)) <= 0) {
iter_->Invalidate();
return;
}
if (smallest_ != nullptr &&
icmp_->user_comparator()->Compare(target, smallest_->user_key) < 0) {
iter_->Seek(smallest_->user_key);
return;
}
iter_->Seek(target);
}
// NOTE: target is a user key
void TruncatedRangeDelIterator::SeekForPrev(const Slice& target) {
if (smallest_ != nullptr &&
icmp_->Compare(ParsedInternalKey(target, 0, kTypeRangeDeletion),
*smallest_) < 0) {
iter_->Invalidate();
return;
}
if (largest_ != nullptr &&
icmp_->user_comparator()->Compare(largest_->user_key, target) < 0) {
iter_->SeekForPrev(largest_->user_key);
return;
}
iter_->SeekForPrev(target);
}
void TruncatedRangeDelIterator::SeekToFirst() {
if (smallest_ != nullptr) {
iter_->Seek(smallest_->user_key);
return;
}
iter_->SeekToTopFirst();
}
void TruncatedRangeDelIterator::SeekToLast() {
if (largest_ != nullptr) {
iter_->SeekForPrev(largest_->user_key);
return;
}
iter_->SeekToTopLast();
}
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
TruncatedRangeDelIterator::SplitBySnapshot(
const std::vector<SequenceNumber>& snapshots) {
using FragmentedIterPair =
std::pair<const SequenceNumber,
std::unique_ptr<FragmentedRangeTombstoneIterator>>;
auto split_untruncated_iters = iter_->SplitBySnapshot(snapshots);
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
split_truncated_iters;
std::for_each(
split_untruncated_iters.begin(), split_untruncated_iters.end(),
[&](FragmentedIterPair& iter_pair) {
std::unique_ptr<TruncatedRangeDelIterator> truncated_iter(
new TruncatedRangeDelIterator(std::move(iter_pair.second), icmp_,
smallest_ikey_, largest_ikey_));
split_truncated_iters.emplace(iter_pair.first,
std::move(truncated_iter));
});
return split_truncated_iters;
}
ForwardRangeDelIterator::ForwardRangeDelIterator(
const InternalKeyComparator* icmp,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters)
: icmp_(icmp),
iters_(iters),
unused_idx_(0),
active_seqnums_(SeqMaxComparator()),
active_iters_(EndKeyMinComparator(icmp)),
inactive_iters_(StartKeyMinComparator(icmp)) {}
bool ForwardRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) {
assert(iters_ != nullptr);
// Move active iterators that end before parsed.
while (!active_iters_.empty() &&
icmp_->Compare((*active_iters_.top())->end_key(), parsed) <= 0) {
TruncatedRangeDelIterator* iter = PopActiveIter();
do {
iter->Next();
} while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
// Move inactive iterators that start before parsed.
while (!inactive_iters_.empty() &&
icmp_->Compare(inactive_iters_.top()->start_key(), parsed) <= 0) {
TruncatedRangeDelIterator* iter = PopInactiveIter();
while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0) {
iter->Next();
}
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
return active_seqnums_.empty()
? false
: (*active_seqnums_.begin())->seq() > parsed.sequence;
}
void ForwardRangeDelIterator::Invalidate() {
unused_idx_ = 0;
active_iters_.clear();
active_seqnums_.clear();
inactive_iters_.clear();
}
ReverseRangeDelIterator::ReverseRangeDelIterator(
const InternalKeyComparator* icmp,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters)
: icmp_(icmp),
iters_(iters),
unused_idx_(0),
active_seqnums_(SeqMaxComparator()),
active_iters_(StartKeyMaxComparator(icmp)),
inactive_iters_(EndKeyMaxComparator(icmp)) {}
bool ReverseRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) {
assert(iters_ != nullptr);
// Move active iterators that start after parsed.
while (!active_iters_.empty() &&
icmp_->Compare(parsed, (*active_iters_.top())->start_key()) < 0) {
TruncatedRangeDelIterator* iter = PopActiveIter();
do {
iter->Prev();
} while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
// Move inactive iterators that end after parsed.
while (!inactive_iters_.empty() &&
icmp_->Compare(parsed, inactive_iters_.top()->end_key()) < 0) {
TruncatedRangeDelIterator* iter = PopInactiveIter();
while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0) {
iter->Prev();
}
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
return active_seqnums_.empty()
? false
: (*active_seqnums_.begin())->seq() > parsed.sequence;
}
void ReverseRangeDelIterator::Invalidate() {
unused_idx_ = 0;
active_iters_.clear();
active_seqnums_.clear();
inactive_iters_.clear();
}
bool RangeDelAggregatorV2::StripeRep::ShouldDelete(
const ParsedInternalKey& parsed, RangeDelPositioningMode mode) {
if (!InStripe(parsed.sequence) || IsEmpty()) {
return false;
}
switch (mode) {
case RangeDelPositioningMode::kForwardTraversal:
InvalidateReverseIter();
// Pick up previously unseen iterators.
for (auto it = std::next(iters_.begin(), forward_iter_.UnusedIdx());
it != iters_.end(); ++it, forward_iter_.IncUnusedIdx()) {
auto& iter = *it;
forward_iter_.AddNewIter(iter.get(), parsed);
}
return forward_iter_.ShouldDelete(parsed);
case RangeDelPositioningMode::kBackwardTraversal:
InvalidateForwardIter();
// Pick up previously unseen iterators.
for (auto it = std::next(iters_.begin(), reverse_iter_.UnusedIdx());
it != iters_.end(); ++it, reverse_iter_.IncUnusedIdx()) {
auto& iter = *it;
reverse_iter_.AddNewIter(iter.get(), parsed);
}
return reverse_iter_.ShouldDelete(parsed);
default:
assert(false);
return false;
}
}
bool RangeDelAggregatorV2::StripeRep::IsRangeOverlapped(const Slice& start,
const Slice& end) {
Invalidate();
// Set the internal start/end keys so that:
// - if start_ikey has the same user key and sequence number as the
// current end key, start_ikey will be considered greater; and
// - if end_ikey has the same user key and sequence number as the current
// start key, end_ikey will be considered greater.
ParsedInternalKey start_ikey(start, kMaxSequenceNumber,
static_cast<ValueType>(0));
ParsedInternalKey end_ikey(end, 0, static_cast<ValueType>(0));
for (auto& iter : iters_) {
bool checked_candidate_tombstones = false;
for (iter->SeekForPrev(start);
iter->Valid() && icmp_->Compare(iter->start_key(), end_ikey) <= 0;
iter->Next()) {
checked_candidate_tombstones = true;
if (icmp_->Compare(start_ikey, iter->end_key()) < 0 &&
icmp_->Compare(iter->start_key(), end_ikey) <= 0) {
return true;
}
}
if (!checked_candidate_tombstones) {
// Do an additional check for when the end of the range is the begin
// key of a tombstone, which we missed earlier since SeekForPrev'ing
// to the start was invalid.
iter->SeekForPrev(end);
if (iter->Valid() && icmp_->Compare(start_ikey, iter->end_key()) < 0 &&
icmp_->Compare(iter->start_key(), end_ikey) <= 0) {
return true;
}
}
}
return false;
}
void ReadRangeDelAggregatorV2::AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest, const InternalKey* largest) {
if (input_iter == nullptr || input_iter->empty()) {
return;
}
rep_.AddTombstones(
std::unique_ptr<TruncatedRangeDelIterator>(new TruncatedRangeDelIterator(
std::move(input_iter), icmp_, smallest, largest)));
}
bool ReadRangeDelAggregatorV2::ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode) {
return rep_.ShouldDelete(parsed, mode);
}
bool ReadRangeDelAggregatorV2::IsRangeOverlapped(const Slice& start,
const Slice& end) {
InvalidateRangeDelMapPositions();
return rep_.IsRangeOverlapped(start, end);
}
void CompactionRangeDelAggregatorV2::AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest, const InternalKey* largest) {
if (input_iter == nullptr || input_iter->empty()) {
return;
}
assert(input_iter->lower_bound() == 0);
assert(input_iter->upper_bound() == kMaxSequenceNumber);
parent_iters_.emplace_back(new TruncatedRangeDelIterator(
std::move(input_iter), icmp_, smallest, largest));
auto split_iters = parent_iters_.back()->SplitBySnapshot(*snapshots_);
for (auto& split_iter : split_iters) {
auto it = reps_.find(split_iter.first);
if (it == reps_.end()) {
bool inserted;
SequenceNumber upper_bound = split_iter.second->upper_bound();
SequenceNumber lower_bound = split_iter.second->lower_bound();
std::tie(it, inserted) = reps_.emplace(
split_iter.first, StripeRep(icmp_, upper_bound, lower_bound));
assert(inserted);
}
assert(it != reps_.end());
it->second.AddTombstones(std::move(split_iter.second));
}
}
bool CompactionRangeDelAggregatorV2::ShouldDelete(
const ParsedInternalKey& parsed, RangeDelPositioningMode mode) {
auto it = reps_.lower_bound(parsed.sequence);
if (it == reps_.end()) {
return false;
}
return it->second.ShouldDelete(parsed, mode);
}
namespace {
class TruncatedRangeDelMergingIter : public InternalIterator {
public:
TruncatedRangeDelMergingIter(
const InternalKeyComparator* icmp, const Slice* lower_bound,
const Slice* upper_bound, bool upper_bound_inclusive,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>& children)
: icmp_(icmp),
lower_bound_(lower_bound),
upper_bound_(upper_bound),
upper_bound_inclusive_(upper_bound_inclusive),
heap_(StartKeyMinComparator(icmp)) {
for (auto& child : children) {
if (child != nullptr) {
assert(child->lower_bound() == 0);
assert(child->upper_bound() == kMaxSequenceNumber);
children_.push_back(child.get());
}
}
}
bool Valid() const override {
return !heap_.empty() && BeforeEndKey(heap_.top());
}
Status status() const override { return Status::OK(); }
void SeekToFirst() override {
heap_.clear();
for (auto& child : children_) {
if (lower_bound_ != nullptr) {
child->Seek(*lower_bound_);
} else {
child->SeekToFirst();
}
if (child->Valid()) {
heap_.push(child);
}
}
}
void Next() override {
auto* top = heap_.top();
top->InternalNext();
if (top->Valid()) {
heap_.replace_top(top);
} else {
heap_.pop();
}
}
Slice key() const override {
auto* top = heap_.top();
cur_start_key_.Set(top->start_key().user_key, top->seq(),
kTypeRangeDeletion);
return cur_start_key_.Encode();
}
Slice value() const override {
auto* top = heap_.top();
assert(top->end_key().sequence == kMaxSequenceNumber);
return top->end_key().user_key;
}
// Unused InternalIterator methods
void Prev() override { assert(false); }
void Seek(const Slice& /* target */) override { assert(false); }
void SeekForPrev(const Slice& /* target */) override { assert(false); }
void SeekToLast() override { assert(false); }
private:
bool BeforeEndKey(const TruncatedRangeDelIterator* iter) const {
if (upper_bound_ == nullptr) {
return true;
}
int cmp = icmp_->user_comparator()->Compare(iter->start_key().user_key,
*upper_bound_);
return upper_bound_inclusive_ ? cmp <= 0 : cmp < 0;
}
const InternalKeyComparator* icmp_;
const Slice* lower_bound_;
const Slice* upper_bound_;
bool upper_bound_inclusive_;
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> heap_;
std::vector<TruncatedRangeDelIterator*> children_;
mutable InternalKey cur_start_key_;
};
} // namespace
std::unique_ptr<FragmentedRangeTombstoneIterator>
CompactionRangeDelAggregatorV2::NewIterator(const Slice* lower_bound,
const Slice* upper_bound,
bool upper_bound_inclusive) {
InvalidateRangeDelMapPositions();
std::unique_ptr<TruncatedRangeDelMergingIter> merging_iter(
new TruncatedRangeDelMergingIter(icmp_, lower_bound, upper_bound,
upper_bound_inclusive, parent_iters_));
// TODO: add tests where tombstone fragments can be outside of upper and lower
// bound range
auto fragmented_tombstone_list =
std::make_shared<FragmentedRangeTombstoneList>(
std::move(merging_iter), *icmp_, true /* for_compaction */,
*snapshots_);
return std::unique_ptr<FragmentedRangeTombstoneIterator>(
new FragmentedRangeTombstoneIterator(
fragmented_tombstone_list, *icmp_,
kMaxSequenceNumber /* upper_bound */));
}
} // namespace rocksdb

@ -1,436 +0,0 @@
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <algorithm>
#include <iterator>
#include <list>
#include <map>
#include <set>
#include <string>
#include <vector>
#include "db/compaction_iteration_stats.h"
#include "db/dbformat.h"
#include "db/pinned_iterators_manager.h"
#include "db/range_del_aggregator.h"
#include "db/range_tombstone_fragmenter.h"
#include "db/version_edit.h"
#include "include/rocksdb/comparator.h"
#include "include/rocksdb/types.h"
#include "table/internal_iterator.h"
#include "table/scoped_arena_iterator.h"
#include "table/table_builder.h"
#include "util/heap.h"
#include "util/kv_map.h"
namespace rocksdb {
class TruncatedRangeDelIterator {
public:
TruncatedRangeDelIterator(
std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
const InternalKeyComparator* icmp, const InternalKey* smallest,
const InternalKey* largest);
bool Valid() const;
void Next();
void Prev();
void InternalNext();
// Seeks to the tombstone with the highest viisble sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at
// the earliest tombstone that ends after target.
void Seek(const Slice& target);
// Seeks to the tombstone with the highest viisble sequence number that covers
// target (a user key). If no such tombstone exists, the position will be at
// the latest tombstone that starts before target.
void SeekForPrev(const Slice& target);
void SeekToFirst();
void SeekToLast();
ParsedInternalKey start_key() const {
return (smallest_ == nullptr ||
icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0)
? iter_->parsed_start_key()
: *smallest_;
}
ParsedInternalKey end_key() const {
return (largest_ == nullptr ||
icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0)
? iter_->parsed_end_key()
: *largest_;
}
SequenceNumber seq() const { return iter_->seq(); }
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
SplitBySnapshot(const std::vector<SequenceNumber>& snapshots);
SequenceNumber upper_bound() const { return iter_->upper_bound(); }
SequenceNumber lower_bound() const { return iter_->lower_bound(); }
private:
std::unique_ptr<FragmentedRangeTombstoneIterator> iter_;
const InternalKeyComparator* icmp_;
const ParsedInternalKey* smallest_ = nullptr;
const ParsedInternalKey* largest_ = nullptr;
std::list<ParsedInternalKey> pinned_bounds_;
const InternalKey* smallest_ikey_;
const InternalKey* largest_ikey_;
};
struct SeqMaxComparator {
bool operator()(const TruncatedRangeDelIterator* a,
const TruncatedRangeDelIterator* b) const {
return a->seq() > b->seq();
}
};
struct StartKeyMinComparator {
explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const TruncatedRangeDelIterator* a,
const TruncatedRangeDelIterator* b) const {
return icmp->Compare(a->start_key(), b->start_key()) > 0;
}
const InternalKeyComparator* icmp;
};
class ForwardRangeDelIterator {
public:
ForwardRangeDelIterator(
const InternalKeyComparator* icmp,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
bool ShouldDelete(const ParsedInternalKey& parsed);
void Invalidate();
void AddNewIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
iter->Seek(parsed.user_key);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
size_t UnusedIdx() const { return unused_idx_; }
void IncUnusedIdx() { unused_idx_++; }
private:
using ActiveSeqSet =
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
struct EndKeyMinComparator {
explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const ActiveSeqSet::const_iterator& a,
const ActiveSeqSet::const_iterator& b) const {
return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0;
}
const InternalKeyComparator* icmp;
};
void PushIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
if (!iter->Valid()) {
// The iterator has been fully consumed, so we don't need to add it to
// either of the heaps.
return;
}
int cmp = icmp_->Compare(parsed, iter->start_key());
if (cmp < 0) {
PushInactiveIter(iter);
} else {
PushActiveIter(iter);
}
}
void PushActiveIter(TruncatedRangeDelIterator* iter) {
auto seq_pos = active_seqnums_.insert(iter);
active_iters_.push(seq_pos);
}
TruncatedRangeDelIterator* PopActiveIter() {
auto active_top = active_iters_.top();
auto iter = *active_top;
active_iters_.pop();
active_seqnums_.erase(active_top);
return iter;
}
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
inactive_iters_.push(iter);
}
TruncatedRangeDelIterator* PopInactiveIter() {
auto* iter = inactive_iters_.top();
inactive_iters_.pop();
return iter;
}
const InternalKeyComparator* icmp_;
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
size_t unused_idx_;
ActiveSeqSet active_seqnums_;
BinaryHeap<ActiveSeqSet::const_iterator, EndKeyMinComparator> active_iters_;
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> inactive_iters_;
};
class ReverseRangeDelIterator {
public:
ReverseRangeDelIterator(
const InternalKeyComparator* icmp,
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
bool ShouldDelete(const ParsedInternalKey& parsed);
void Invalidate();
void AddNewIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
iter->SeekForPrev(parsed.user_key);
PushIter(iter, parsed);
assert(active_iters_.size() == active_seqnums_.size());
}
size_t UnusedIdx() const { return unused_idx_; }
void IncUnusedIdx() { unused_idx_++; }
private:
using ActiveSeqSet =
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
struct EndKeyMaxComparator {
explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const TruncatedRangeDelIterator* a,
const TruncatedRangeDelIterator* b) const {
return icmp->Compare(a->end_key(), b->end_key()) < 0;
}
const InternalKeyComparator* icmp;
};
struct StartKeyMaxComparator {
explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
bool operator()(const ActiveSeqSet::const_iterator& a,
const ActiveSeqSet::const_iterator& b) const {
return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0;
}
const InternalKeyComparator* icmp;
};
void PushIter(TruncatedRangeDelIterator* iter,
const ParsedInternalKey& parsed) {
if (!iter->Valid()) {
// The iterator has been fully consumed, so we don't need to add it to
// either of the heaps.
} else if (icmp_->Compare(iter->end_key(), parsed) <= 0) {
PushInactiveIter(iter);
} else {
PushActiveIter(iter);
}
}
void PushActiveIter(TruncatedRangeDelIterator* iter) {
auto seq_pos = active_seqnums_.insert(iter);
active_iters_.push(seq_pos);
}
TruncatedRangeDelIterator* PopActiveIter() {
auto active_top = active_iters_.top();
auto iter = *active_top;
active_iters_.pop();
active_seqnums_.erase(active_top);
return iter;
}
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
inactive_iters_.push(iter);
}
TruncatedRangeDelIterator* PopInactiveIter() {
auto* iter = inactive_iters_.top();
inactive_iters_.pop();
return iter;
}
const InternalKeyComparator* icmp_;
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
size_t unused_idx_;
ActiveSeqSet active_seqnums_;
BinaryHeap<ActiveSeqSet::const_iterator, StartKeyMaxComparator> active_iters_;
BinaryHeap<TruncatedRangeDelIterator*, EndKeyMaxComparator> inactive_iters_;
};
class RangeDelAggregatorV2 {
public:
explicit RangeDelAggregatorV2(const InternalKeyComparator* icmp)
: icmp_(icmp) {}
virtual ~RangeDelAggregatorV2() {}
virtual void AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest = nullptr,
const InternalKey* largest = nullptr) = 0;
bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) {
ParsedInternalKey parsed;
if (!ParseInternalKey(key, &parsed)) {
return false;
}
return ShouldDelete(parsed, mode);
}
virtual bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode) = 0;
virtual void InvalidateRangeDelMapPositions() = 0;
virtual bool IsEmpty() const = 0;
bool AddFile(uint64_t file_number) {
return files_seen_.insert(file_number).second;
}
protected:
class StripeRep {
public:
StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound,
SequenceNumber lower_bound)
: icmp_(icmp),
forward_iter_(icmp, &iters_),
reverse_iter_(icmp, &iters_),
upper_bound_(upper_bound),
lower_bound_(lower_bound) {}
void AddTombstones(std::unique_ptr<TruncatedRangeDelIterator> input_iter) {
iters_.push_back(std::move(input_iter));
}
bool IsEmpty() const { return iters_.empty(); }
bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode);
void Invalidate() {
InvalidateForwardIter();
InvalidateReverseIter();
}
bool IsRangeOverlapped(const Slice& start, const Slice& end);
private:
bool InStripe(SequenceNumber seq) const {
return lower_bound_ <= seq && seq <= upper_bound_;
}
void InvalidateForwardIter() { forward_iter_.Invalidate(); }
void InvalidateReverseIter() { reverse_iter_.Invalidate(); }
const InternalKeyComparator* icmp_;
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> iters_;
ForwardRangeDelIterator forward_iter_;
ReverseRangeDelIterator reverse_iter_;
SequenceNumber upper_bound_;
SequenceNumber lower_bound_;
};
const InternalKeyComparator* icmp_;
private:
std::set<uint64_t> files_seen_;
};
class ReadRangeDelAggregatorV2 : public RangeDelAggregatorV2 {
public:
ReadRangeDelAggregatorV2(const InternalKeyComparator* icmp,
SequenceNumber upper_bound)
: RangeDelAggregatorV2(icmp),
rep_(icmp, upper_bound, 0 /* lower_bound */) {}
~ReadRangeDelAggregatorV2() override {}
using RangeDelAggregatorV2::ShouldDelete;
void AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest = nullptr,
const InternalKey* largest = nullptr) override;
bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode) override;
bool IsRangeOverlapped(const Slice& start, const Slice& end);
void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); }
bool IsEmpty() const override { return rep_.IsEmpty(); }
private:
StripeRep rep_;
};
class CompactionRangeDelAggregatorV2 : public RangeDelAggregatorV2 {
public:
CompactionRangeDelAggregatorV2(const InternalKeyComparator* icmp,
const std::vector<SequenceNumber>& snapshots)
: RangeDelAggregatorV2(icmp), snapshots_(&snapshots) {}
~CompactionRangeDelAggregatorV2() override {}
void AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
const InternalKey* smallest = nullptr,
const InternalKey* largest = nullptr) override;
using RangeDelAggregatorV2::ShouldDelete;
bool ShouldDelete(const ParsedInternalKey& parsed,
RangeDelPositioningMode mode) override;
bool IsRangeOverlapped(const Slice& start, const Slice& end);
void InvalidateRangeDelMapPositions() override {
for (auto& rep : reps_) {
rep.second.Invalidate();
}
}
bool IsEmpty() const override {
for (const auto& rep : reps_) {
if (!rep.second.IsEmpty()) {
return false;
}
}
return true;
}
// Creates an iterator over all the range tombstones in the aggregator, for
// use in compaction. Nullptr arguments indicate that the iterator range is
// unbounded.
// NOTE: the boundaries are used for optimization purposes to reduce the
// number of tombstones that are passed to the fragmenter; they do not
// guarantee that the resulting iterator only contains range tombstones that
// cover keys in the provided range. If required, these bounds must be
// enforced during iteration.
std::unique_ptr<FragmentedRangeTombstoneIterator> NewIterator(
const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr,
bool upper_bound_inclusive = false);
private:
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> parent_iters_;
std::map<SequenceNumber, StripeRep> reps_;
const std::vector<SequenceNumber>* snapshots_;
};
} // namespace rocksdb

@ -1,709 +0,0 @@
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "db/range_del_aggregator_v2.h"
#include <memory>
#include <string>
#include <vector>
#include "db/db_test_util.h"
#include "db/dbformat.h"
#include "db/range_tombstone_fragmenter.h"
#include "util/testutil.h"
namespace rocksdb {
class RangeDelAggregatorV2Test : public testing::Test {};
namespace {
static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator());
std::unique_ptr<InternalIterator> MakeRangeDelIter(
const std::vector<RangeTombstone>& range_dels) {
std::vector<std::string> keys, values;
for (const auto& range_del : range_dels) {
auto key_and_value = range_del.Serialize();
keys.push_back(key_and_value.first.Encode().ToString());
values.push_back(key_and_value.second.ToString());
}
return std::unique_ptr<test::VectorIterator>(
new test::VectorIterator(keys, values));
}
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>>
MakeFragmentedTombstoneLists(
const std::vector<std::vector<RangeTombstone>>& range_dels_list) {
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>> fragment_lists;
for (const auto& range_dels : range_dels_list) {
auto range_del_iter = MakeRangeDelIter(range_dels);
fragment_lists.emplace_back(new FragmentedRangeTombstoneList(
std::move(range_del_iter), bytewise_icmp));
}
return fragment_lists;
}
struct TruncatedIterScanTestCase {
ParsedInternalKey start;
ParsedInternalKey end;
SequenceNumber seq;
};
struct TruncatedIterSeekTestCase {
Slice target;
ParsedInternalKey start;
ParsedInternalKey end;
SequenceNumber seq;
bool invalid;
};
struct ShouldDeleteTestCase {
ParsedInternalKey lookup_key;
bool result;
};
struct IsRangeOverlappedTestCase {
Slice start;
Slice end;
bool result;
};
ParsedInternalKey UncutEndpoint(const Slice& s) {
return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion);
}
ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq) {
return ParsedInternalKey(key, seq, kTypeValue);
}
void VerifyIterator(
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
const std::vector<TruncatedIterScanTestCase>& expected_range_dels) {
// Test forward iteration.
iter->SeekToFirst();
for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) {
ASSERT_TRUE(iter->Valid());
EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start));
EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end));
EXPECT_EQ(expected_range_dels[i].seq, iter->seq());
}
EXPECT_FALSE(iter->Valid());
// Test reverse iteration.
iter->SeekToLast();
std::vector<TruncatedIterScanTestCase> reverse_expected_range_dels(
expected_range_dels.rbegin(), expected_range_dels.rend());
for (size_t i = 0; i < reverse_expected_range_dels.size();
i++, iter->Prev()) {
ASSERT_TRUE(iter->Valid());
EXPECT_EQ(0, icmp.Compare(iter->start_key(),
reverse_expected_range_dels[i].start));
EXPECT_EQ(
0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end));
EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq());
}
EXPECT_FALSE(iter->Valid());
}
void VerifySeek(TruncatedRangeDelIterator* iter,
const InternalKeyComparator& icmp,
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
for (const auto& test_case : test_cases) {
iter->Seek(test_case.target);
if (test_case.invalid) {
ASSERT_FALSE(iter->Valid());
} else {
ASSERT_TRUE(iter->Valid());
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
EXPECT_EQ(test_case.seq, iter->seq());
}
}
}
void VerifySeekForPrev(
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
for (const auto& test_case : test_cases) {
iter->SeekForPrev(test_case.target);
if (test_case.invalid) {
ASSERT_FALSE(iter->Valid());
} else {
ASSERT_TRUE(iter->Valid());
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
EXPECT_EQ(test_case.seq, iter->seq());
}
}
}
void VerifyShouldDelete(RangeDelAggregatorV2* range_del_agg,
const std::vector<ShouldDeleteTestCase>& test_cases) {
for (const auto& test_case : test_cases) {
EXPECT_EQ(
test_case.result,
range_del_agg->ShouldDelete(
test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal));
}
for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) {
const auto& test_case = *it;
EXPECT_EQ(
test_case.result,
range_del_agg->ShouldDelete(
test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal));
}
}
void VerifyIsRangeOverlapped(
ReadRangeDelAggregatorV2* range_del_agg,
const std::vector<IsRangeOverlappedTestCase>& test_cases) {
for (const auto& test_case : test_cases) {
EXPECT_EQ(test_case.result,
range_del_agg->IsRangeOverlapped(test_case.start, test_case.end));
}
}
void CheckIterPosition(const RangeTombstone& tombstone,
const FragmentedRangeTombstoneIterator* iter) {
// Test InternalIterator interface.
EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key()));
EXPECT_EQ(tombstone.end_key_, iter->value());
EXPECT_EQ(tombstone.seq_, iter->seq());
// Test FragmentedRangeTombstoneIterator interface.
EXPECT_EQ(tombstone.start_key_, iter->start_key());
EXPECT_EQ(tombstone.end_key_, iter->end_key());
EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key()));
}
void VerifyFragmentedRangeDels(
FragmentedRangeTombstoneIterator* iter,
const std::vector<RangeTombstone>& expected_tombstones) {
iter->SeekToFirst();
for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) {
ASSERT_TRUE(iter->Valid());
CheckIterPosition(expected_tombstones[i], iter);
}
EXPECT_FALSE(iter->Valid());
}
} // namespace
TEST_F(RangeDelAggregatorV2Test, EmptyTruncatedIter) {
auto range_del_iter = MakeRangeDelIter({});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
kMaxSequenceNumber));
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
nullptr);
iter.SeekToFirst();
ASSERT_FALSE(iter.Valid());
iter.SeekToLast();
ASSERT_FALSE(iter.Valid());
}
TEST_F(RangeDelAggregatorV2Test, UntruncatedIter) {
auto range_del_iter =
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
kMaxSequenceNumber));
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
nullptr);
VerifyIterator(&iter, bytewise_icmp,
{{UncutEndpoint("a"), UncutEndpoint("e"), 10},
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
VerifySeek(
&iter, bytewise_icmp,
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
{"", UncutEndpoint("a"), UncutEndpoint("e"), 10}});
VerifySeekForPrev(
&iter, bytewise_icmp,
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
}
TEST_F(RangeDelAggregatorV2Test, UntruncatedIterWithSnapshot) {
auto range_del_iter =
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
9 /* snapshot */));
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
nullptr);
VerifyIterator(&iter, bytewise_icmp,
{{UncutEndpoint("e"), UncutEndpoint("g"), 8},
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
VerifySeek(
&iter, bytewise_icmp,
{{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
{"", UncutEndpoint("e"), UncutEndpoint("g"), 8}});
VerifySeekForPrev(
&iter, bytewise_icmp,
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
}
TEST_F(RangeDelAggregatorV2Test, TruncatedIterPartiallyCutTombstones) {
auto range_del_iter =
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
kMaxSequenceNumber));
InternalKey smallest("d", 7, kTypeValue);
InternalKey largest("m", 9, kTypeValue);
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
&smallest, &largest);
VerifyIterator(&iter, bytewise_icmp,
{{InternalValue("d", 7), UncutEndpoint("e"), 10},
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
{UncutEndpoint("j"), InternalValue("m", 8), 4}});
VerifySeek(
&iter, bytewise_icmp,
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("j"), InternalValue("m", 8), 4},
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
{"", InternalValue("d", 7), UncutEndpoint("e"), 10}});
VerifySeekForPrev(
&iter, bytewise_icmp,
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
{"n", UncutEndpoint("j"), InternalValue("m", 8), 4},
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
}
TEST_F(RangeDelAggregatorV2Test, TruncatedIterFullyCutTombstones) {
auto range_del_iter =
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
kMaxSequenceNumber));
InternalKey smallest("f", 7, kTypeValue);
InternalKey largest("i", 9, kTypeValue);
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
&smallest, &largest);
VerifyIterator(&iter, bytewise_icmp,
{{InternalValue("f", 7), UncutEndpoint("g"), 8}});
VerifySeek(
&iter, bytewise_icmp,
{{"d", InternalValue("f", 7), UncutEndpoint("g"), 8},
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
{"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
VerifySeekForPrev(
&iter, bytewise_icmp,
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
{"j", InternalValue("f", 7), UncutEndpoint("g"), 8}});
}
TEST_F(RangeDelAggregatorV2Test, SingleIterInAggregator) {
auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}});
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
bytewise_icmp);
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
kMaxSequenceNumber));
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
range_del_agg.AddTombstones(std::move(input_iter));
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
{InternalValue("b", 9), true},
{InternalValue("d", 9), true},
{InternalValue("e", 7), true},
{InternalValue("g", 7), false}});
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
{"_", "a", true},
{"a", "c", true},
{"d", "f", true},
{"g", "l", false}});
}
TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregator) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
{InternalValue("b", 19), false},
{InternalValue("b", 9), true},
{InternalValue("d", 9), true},
{InternalValue("e", 7), true},
{InternalValue("g", 7), false},
{InternalValue("h", 24), true},
{InternalValue("i", 24), false},
{InternalValue("ii", 14), true},
{InternalValue("j", 14), false}});
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
{"_", "a", true},
{"a", "c", true},
{"d", "f", true},
{"g", "l", true},
{"x", "y", false}});
}
TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregatorWithUpperBound) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
19 /* snapshot */));
range_del_agg.AddTombstones(std::move(input_iter));
}
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
{InternalValue("a", 9), true},
{InternalValue("b", 9), true},
{InternalValue("d", 9), true},
{InternalValue("e", 7), true},
{InternalValue("g", 7), false},
{InternalValue("h", 24), false},
{InternalValue("i", 24), false},
{InternalValue("ii", 14), true},
{InternalValue("j", 14), false}});
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
{"_", "a", true},
{"a", "c", true},
{"d", "f", true},
{"g", "l", true},
{"x", "y", false}});
}
TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregator) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
{InternalKey("a", 4, kTypeValue),
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
{InternalKey("m", 20, kTypeValue),
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
for (size_t i = 0; i < fragment_lists.size(); i++) {
const auto& fragment_list = fragment_lists[i];
const auto& bounds = iter_bounds[i];
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
19 /* snapshot */));
range_del_agg.AddTombstones(std::move(input_iter), &bounds.first,
&bounds.second);
}
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
{InternalValue("a", 9), false},
{InternalValue("a", 4), true},
{InternalValue("m", 10), false},
{InternalValue("m", 9), true},
{InternalValue("x", 10), false},
{InternalValue("x", 9), false},
{InternalValue("x", 5), true},
{InternalValue("z", 9), false}});
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
{"_", "a", true},
{"a", "n", true},
{"l", "x", true},
{"w", "z", true},
{"zzz", "zz", false},
{"zz", "zzz", false}});
}
TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregatorSameLevel) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
{InternalKey("a", 4, kTypeValue),
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
{InternalKey("m", 20, kTypeValue),
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
auto add_iter_to_agg = [&](size_t i) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_lists[i].get(),
bytewise_icmp, 19 /* snapshot */));
range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first,
&iter_bounds[i].second);
};
add_iter_to_agg(0);
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
{InternalValue("a", 9), false},
{InternalValue("a", 4), true}});
add_iter_to_agg(1);
VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false},
{InternalValue("m", 9), true}});
add_iter_to_agg(2);
VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false},
{InternalValue("x", 9), false},
{InternalValue("x", 5), true},
{InternalValue("z", 9), false}});
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
{"_", "a", true},
{"a", "n", true},
{"l", "x", true},
{"w", "z", true},
{"zzz", "zz", false},
{"zz", "zzz", false}});
}
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorNoSnapshots) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots;
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
{InternalValue("b", 19), false},
{InternalValue("b", 9), true},
{InternalValue("d", 9), true},
{InternalValue("e", 7), true},
{InternalValue("g", 7), false},
{InternalValue("h", 24), true},
{InternalValue("i", 24), false},
{InternalValue("ii", 14), true},
{InternalValue("j", 14), false}});
auto range_del_compaction_iter = range_del_agg.NewIterator();
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
{"b", "c", 10},
{"c", "e", 10},
{"e", "g", 8},
{"h", "i", 25},
{"ii", "j", 15}});
}
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorWithSnapshots) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots{9, 19};
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
VerifyShouldDelete(
&range_del_agg,
{
{InternalValue("a", 19), false}, // [10, 19]
{InternalValue("a", 9), false}, // [0, 9]
{InternalValue("b", 9), false}, // [0, 9]
{InternalValue("d", 9), false}, // [0, 9]
{InternalValue("d", 7), true}, // [0, 9]
{InternalValue("e", 7), true}, // [0, 9]
{InternalValue("g", 7), false}, // [0, 9]
{InternalValue("h", 24), true}, // [20, kMaxSequenceNumber]
{InternalValue("i", 24), false}, // [20, kMaxSequenceNumber]
{InternalValue("ii", 14), true}, // [10, 19]
{InternalValue("j", 14), false} // [10, 19]
});
auto range_del_compaction_iter = range_del_agg.NewIterator();
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
{"a", "b", 10},
{"b", "c", 10},
{"c", "e", 10},
{"c", "e", 8},
{"e", "g", 8},
{"h", "i", 25},
{"ii", "j", 15}});
}
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorLeft) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots{9, 19};
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
Slice start("_");
Slice end("__");
}
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorRight) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots{9, 19};
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
Slice start("p");
Slice end("q");
auto range_del_compaction_iter1 =
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {});
auto range_del_compaction_iter2 =
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {});
}
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorBoundedIterator) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "e", 10}, {"c", "g", 8}},
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots{9, 19};
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
Slice start("bb");
Slice end("e");
auto range_del_compaction_iter1 =
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(),
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}});
auto range_del_compaction_iter2 =
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
VerifyFragmentedRangeDels(
range_del_compaction_iter2.get(),
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}});
}
TEST_F(RangeDelAggregatorV2Test,
CompactionAggregatorBoundedIteratorExtraFragments) {
auto fragment_lists = MakeFragmentedTombstoneLists(
{{{"a", "d", 10}, {"c", "g", 8}},
{{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}});
std::vector<SequenceNumber> snapshots{9, 19};
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
for (const auto& fragment_list : fragment_lists) {
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
kMaxSequenceNumber));
range_del_agg.AddTombstones(std::move(input_iter));
}
Slice start("bb");
Slice end("e");
auto range_del_compaction_iter1 =
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10},
{"b", "c", 20},
{"b", "c", 10},
{"c", "d", 10},
{"c", "d", 8},
{"d", "f", 30},
{"d", "f", 8},
{"f", "g", 8}});
auto range_del_compaction_iter2 =
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10},
{"b", "c", 20},
{"b", "c", 10},
{"c", "d", 10},
{"c", "d", 8},
{"d", "f", 30},
{"d", "f", 8},
{"f", "g", 8}});
}
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -185,7 +185,7 @@ Status TableCache::FindTable(const EnvOptions& env_options,
InternalIterator* TableCache::NewIterator( InternalIterator* TableCache::NewIterator(
const ReadOptions& options, const EnvOptions& env_options, const ReadOptions& options, const EnvOptions& env_options,
const InternalKeyComparator& icomparator, const FileMetaData& file_meta, const InternalKeyComparator& icomparator, const FileMetaData& file_meta,
RangeDelAggregatorV2* range_del_agg, const SliceTransform* prefix_extractor, RangeDelAggregator* range_del_agg, const SliceTransform* prefix_extractor,
TableReader** table_reader_ptr, HistogramImpl* file_read_hist, TableReader** table_reader_ptr, HistogramImpl* file_read_hist,
bool for_compaction, Arena* arena, bool skip_filters, int level, bool for_compaction, Arena* arena, bool skip_filters, int level,
const InternalKey* smallest_compaction_key, const InternalKey* smallest_compaction_key,

@ -15,7 +15,7 @@
#include <stdint.h> #include <stdint.h>
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "options/cf_options.h" #include "options/cf_options.h"
#include "port/port.h" #include "port/port.h"
#include "rocksdb/cache.h" #include "rocksdb/cache.h"
@ -52,7 +52,7 @@ class TableCache {
InternalIterator* NewIterator( InternalIterator* NewIterator(
const ReadOptions& options, const EnvOptions& toptions, const ReadOptions& options, const EnvOptions& toptions,
const InternalKeyComparator& internal_comparator, const InternalKeyComparator& internal_comparator,
const FileMetaData& file_meta, RangeDelAggregatorV2* range_del_agg, const FileMetaData& file_meta, RangeDelAggregator* range_del_agg,
const SliceTransform* prefix_extractor = nullptr, const SliceTransform* prefix_extractor = nullptr,
TableReader** table_reader_ptr = nullptr, TableReader** table_reader_ptr = nullptr,
HistogramImpl* file_read_hist = nullptr, bool for_compaction = false, HistogramImpl* file_read_hist = nullptr, bool for_compaction = false,

@ -459,7 +459,7 @@ class LevelIterator final : public InternalIterator {
const EnvOptions& env_options, const InternalKeyComparator& icomparator, const EnvOptions& env_options, const InternalKeyComparator& icomparator,
const LevelFilesBrief* flevel, const SliceTransform* prefix_extractor, const LevelFilesBrief* flevel, const SliceTransform* prefix_extractor,
bool should_sample, HistogramImpl* file_read_hist, bool for_compaction, bool should_sample, HistogramImpl* file_read_hist, bool for_compaction,
bool skip_filters, int level, RangeDelAggregatorV2* range_del_agg, bool skip_filters, int level, RangeDelAggregator* range_del_agg,
const std::vector<AtomicCompactionUnitBoundary>* compaction_boundaries = const std::vector<AtomicCompactionUnitBoundary>* compaction_boundaries =
nullptr) nullptr)
: table_cache_(table_cache), : table_cache_(table_cache),
@ -571,7 +571,7 @@ class LevelIterator final : public InternalIterator {
bool skip_filters_; bool skip_filters_;
size_t file_index_; size_t file_index_;
int level_; int level_;
RangeDelAggregatorV2* range_del_agg_; RangeDelAggregator* range_del_agg_;
IteratorWrapper file_iter_; // May be nullptr IteratorWrapper file_iter_; // May be nullptr
PinnedIteratorsManager* pinned_iters_mgr_; PinnedIteratorsManager* pinned_iters_mgr_;
@ -985,7 +985,7 @@ double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel(
void Version::AddIterators(const ReadOptions& read_options, void Version::AddIterators(const ReadOptions& read_options,
const EnvOptions& soptions, const EnvOptions& soptions,
MergeIteratorBuilder* merge_iter_builder, MergeIteratorBuilder* merge_iter_builder,
RangeDelAggregatorV2* range_del_agg) { RangeDelAggregator* range_del_agg) {
assert(storage_info_.finalized_); assert(storage_info_.finalized_);
for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) { for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
@ -998,7 +998,7 @@ void Version::AddIteratorsForLevel(const ReadOptions& read_options,
const EnvOptions& soptions, const EnvOptions& soptions,
MergeIteratorBuilder* merge_iter_builder, MergeIteratorBuilder* merge_iter_builder,
int level, int level,
RangeDelAggregatorV2* range_del_agg) { RangeDelAggregator* range_del_agg) {
assert(storage_info_.finalized_); assert(storage_info_.finalized_);
if (level >= storage_info_.num_non_empty_levels()) { if (level >= storage_info_.num_non_empty_levels()) {
// This is an empty level // This is an empty level
@ -1057,7 +1057,7 @@ Status Version::OverlapWithLevelIterator(const ReadOptions& read_options,
Arena arena; Arena arena;
Status status; Status status;
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
*overlap = false; *overlap = false;
@ -4328,7 +4328,7 @@ void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
} }
InternalIterator* VersionSet::MakeInputIterator( InternalIterator* VersionSet::MakeInputIterator(
const Compaction* c, RangeDelAggregatorV2* range_del_agg, const Compaction* c, RangeDelAggregator* range_del_agg,
const EnvOptions& env_options_compactions) { const EnvOptions& env_options_compactions) {
auto cfd = c->column_family_data(); auto cfd = c->column_family_data();
ReadOptions read_options; ReadOptions read_options;

@ -34,7 +34,7 @@
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/file_indexer.h" #include "db/file_indexer.h"
#include "db/log_reader.h" #include "db/log_reader.h"
#include "db/range_del_aggregator_v2.h" #include "db/range_del_aggregator.h"
#include "db/read_callback.h" #include "db/read_callback.h"
#include "db/table_cache.h" #include "db/table_cache.h"
#include "db/version_builder.h" #include "db/version_builder.h"
@ -538,11 +538,11 @@ class Version {
// REQUIRES: This version has been saved (see VersionSet::SaveTo) // REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, const EnvOptions& soptions, void AddIterators(const ReadOptions&, const EnvOptions& soptions,
MergeIteratorBuilder* merger_iter_builder, MergeIteratorBuilder* merger_iter_builder,
RangeDelAggregatorV2* range_del_agg); RangeDelAggregator* range_del_agg);
void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions, void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions,
MergeIteratorBuilder* merger_iter_builder, MergeIteratorBuilder* merger_iter_builder,
int level, RangeDelAggregatorV2* range_del_agg); int level, RangeDelAggregator* range_del_agg);
Status OverlapWithLevelIterator(const ReadOptions&, const EnvOptions&, Status OverlapWithLevelIterator(const ReadOptions&, const EnvOptions&,
const Slice& smallest_user_key, const Slice& smallest_user_key,
@ -935,7 +935,7 @@ class VersionSet {
// Create an iterator that reads over the compaction inputs for "*c". // Create an iterator that reads over the compaction inputs for "*c".
// The caller should delete the iterator when no longer needed. // The caller should delete the iterator when no longer needed.
InternalIterator* MakeInputIterator( InternalIterator* MakeInputIterator(
const Compaction* c, RangeDelAggregatorV2* range_del_agg, const Compaction* c, RangeDelAggregator* range_del_agg,
const EnvOptions& env_options_compactions); const EnvOptions& env_options_compactions);
// Add all files listed in any live version to *live. // Add all files listed in any live version to *live.

@ -44,7 +44,6 @@ LIB_SOURCES = \
db/merge_helper.cc \ db/merge_helper.cc \
db/merge_operator.cc \ db/merge_operator.cc \
db/range_del_aggregator.cc \ db/range_del_aggregator.cc \
db/range_del_aggregator_v2.cc \
db/range_tombstone_fragmenter.cc \ db/range_tombstone_fragmenter.cc \
db/repair.cc \ db/repair.cc \
db/snapshot_impl.cc \ db/snapshot_impl.cc \
@ -335,7 +334,6 @@ MAIN_SOURCES = \
db/repair_test.cc \ db/repair_test.cc \
db/range_del_aggregator_test.cc \ db/range_del_aggregator_test.cc \
db/range_del_aggregator_bench.cc \ db/range_del_aggregator_bench.cc \
db/range_del_aggregator_v2_test.cc \
db/range_tombstone_fragmenter_test.cc \ db/range_tombstone_fragmenter_test.cc \
db/table_properties_collector_test.cc \ db/table_properties_collector_test.cc \
db/util_merge_operators_test.cc \ db/util_merge_operators_test.cc \

@ -19,7 +19,7 @@ Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key,
DBImpl* idb = static_cast<DBImpl*>(db->GetRootDB()); DBImpl* idb = static_cast<DBImpl*>(db->GetRootDB());
auto icmp = InternalKeyComparator(idb->GetOptions().comparator); auto icmp = InternalKeyComparator(idb->GetOptions().comparator);
ReadRangeDelAggregatorV2 range_del_agg(&icmp, ReadRangeDelAggregator range_del_agg(&icmp,
kMaxSequenceNumber /* upper_bound */); kMaxSequenceNumber /* upper_bound */);
Arena arena; Arena arena;
ScopedArenaIterator iter( ScopedArenaIterator iter(

Loading…
Cancel
Save