diff --git a/CMakeLists.txt b/CMakeLists.txt index 49683925a..314f09e4e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -504,7 +504,6 @@ set(SOURCES db/merge_helper.cc db/merge_operator.cc db/range_del_aggregator.cc - db/range_del_aggregator_v2.cc db/range_tombstone_fragmenter.cc db/repair.cc db/snapshot_impl.cc @@ -908,7 +907,6 @@ if(WITH_TESTS) db/plain_table_db_test.cc db/prefix_test.cc db/range_del_aggregator_test.cc - db/range_del_aggregator_v2_test.cc db/range_tombstone_fragmenter_test.cc db/repair_test.cc db/table_properties_collector_test.cc diff --git a/Makefile b/Makefile index de55bacd9..c3a506785 100644 --- a/Makefile +++ b/Makefile @@ -543,7 +543,6 @@ TESTS = \ persistent_cache_test \ statistics_test \ lua_test \ - range_del_aggregator_test \ lru_cache_test \ object_registry_test \ repair_test \ @@ -554,7 +553,7 @@ TESTS = \ trace_analyzer_test \ repeatable_thread_test \ range_tombstone_fragmenter_test \ - range_del_aggregator_v2_test \ + range_del_aggregator_test \ sst_file_reader_test \ PARALLEL_TEST = \ @@ -1588,9 +1587,6 @@ repeatable_thread_test: util/repeatable_thread_test.o $(LIBOBJECTS) $(TESTHARNES range_tombstone_fragmenter_test: db/range_tombstone_fragmenter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -range_del_aggregator_v2_test: db/range_del_aggregator_v2_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) - $(AM_LINK) - sst_file_reader_test: table/sst_file_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) diff --git a/TARGETS b/TARGETS index cc84ef1bb..edf1debe0 100644 --- a/TARGETS +++ b/TARGETS @@ -124,7 +124,6 @@ cpp_library( "db/merge_helper.cc", "db/merge_operator.cc", "db/range_del_aggregator.cc", - "db/range_del_aggregator_v2.cc", "db/range_tombstone_fragmenter.cc", "db/repair.cc", "db/snapshot_impl.cc", @@ -936,11 +935,6 @@ ROCKS_TESTS = [ "db/range_del_aggregator_test.cc", "serial", ], - [ - "range_del_aggregator_v2_test", - "db/range_del_aggregator_v2_test.cc", - "serial", - ], [ "range_tombstone_fragmenter_test", "db/range_tombstone_fragmenter_test.cc", diff --git a/db/builder.cc b/db/builder.cc index 60067c425..b13b68aeb 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -18,7 +18,7 @@ #include "db/event_helpers.h" #include "db/internal_stats.h" #include "db/merge_helper.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/table_cache.h" #include "db/version_edit.h" #include "monitoring/iostats_context_imp.h" @@ -88,8 +88,8 @@ Status BuildTable( Status s; meta->fd.file_size = 0; iter->SeekToFirst(); - std::unique_ptr range_del_agg( - new CompactionRangeDelAggregatorV2(&internal_comparator, snapshots)); + std::unique_ptr range_del_agg( + new CompactionRangeDelAggregator(&internal_comparator, snapshots)); for (auto& range_del_iter : range_del_iters) { range_del_agg->AddTombstones(std::move(range_del_iter)); } diff --git a/db/column_family.cc b/db/column_family.cc index c1a85a341..9a3ae99ca 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -25,7 +25,7 @@ #include "db/db_impl.h" #include "db/internal_stats.h" #include "db/job_context.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/table_properties_collector.h" #include "db/version_set.h" #include "db/write_controller.h" @@ -945,7 +945,7 @@ Status ColumnFamilyData::RangesOverlapWithMemtables( ScopedArenaIterator memtable_iter(merge_iter_builder.Finish()); auto read_seq = super_version->current->version_set()->LastSequence(); - ReadRangeDelAggregatorV2 range_del_agg(&internal_comparator_, read_seq); + ReadRangeDelAggregator range_del_agg(&internal_comparator_, read_seq); auto* active_range_del_iter = super_version->mem->NewRangeTombstoneIterator(read_opts, read_seq); range_del_agg.AddTombstones( diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index ad45602cc..43583af4a 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -18,7 +18,7 @@ CompactionIterator::CompactionIterator( SequenceNumber earliest_write_conflict_snapshot, const SnapshotChecker* snapshot_checker, Env* env, bool report_detailed_time, bool expect_valid_internal_key, - CompactionRangeDelAggregatorV2* range_del_agg, const Compaction* compaction, + CompactionRangeDelAggregator* range_del_agg, const Compaction* compaction, const CompactionFilter* compaction_filter, const std::atomic* shutting_down, const SequenceNumber preserve_deletes_seqnum) @@ -36,7 +36,7 @@ CompactionIterator::CompactionIterator( SequenceNumber earliest_write_conflict_snapshot, const SnapshotChecker* snapshot_checker, Env* env, bool report_detailed_time, bool expect_valid_internal_key, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, std::unique_ptr compaction, const CompactionFilter* compaction_filter, const std::atomic* shutting_down, diff --git a/db/compaction_iterator.h b/db/compaction_iterator.h index 1f6a135b8..6fbd3d0ef 100644 --- a/db/compaction_iterator.h +++ b/db/compaction_iterator.h @@ -13,7 +13,7 @@ #include "db/compaction_iteration_stats.h" #include "db/merge_helper.h" #include "db/pinned_iterators_manager.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/snapshot_checker.h" #include "options/cf_options.h" #include "rocksdb/compaction_filter.h" @@ -64,7 +64,7 @@ class CompactionIterator { SequenceNumber earliest_write_conflict_snapshot, const SnapshotChecker* snapshot_checker, Env* env, bool report_detailed_time, bool expect_valid_internal_key, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, const Compaction* compaction = nullptr, const CompactionFilter* compaction_filter = nullptr, const std::atomic* shutting_down = nullptr, @@ -77,7 +77,7 @@ class CompactionIterator { SequenceNumber earliest_write_conflict_snapshot, const SnapshotChecker* snapshot_checker, Env* env, bool report_detailed_time, bool expect_valid_internal_key, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, std::unique_ptr compaction, const CompactionFilter* compaction_filter = nullptr, const std::atomic* shutting_down = nullptr, @@ -141,7 +141,7 @@ class CompactionIterator { Env* env_; bool report_detailed_time_; bool expect_valid_internal_key_; - CompactionRangeDelAggregatorV2* range_del_agg_; + CompactionRangeDelAggregator* range_del_agg_; std::unique_ptr compaction_; const CompactionFilter* compaction_filter_; const std::atomic* shutting_down_; diff --git a/db/compaction_iterator_test.cc b/db/compaction_iterator_test.cc index a81efafaa..07a9e6ef8 100644 --- a/db/compaction_iterator_test.cc +++ b/db/compaction_iterator_test.cc @@ -228,8 +228,7 @@ class CompactionIteratorTest : public testing::TestWithParam { std::unique_ptr range_del_iter( new FragmentedRangeTombstoneIterator(tombstone_list, icmp_, kMaxSequenceNumber)); - range_del_agg_.reset( - new CompactionRangeDelAggregatorV2(&icmp_, snapshots_)); + range_del_agg_.reset(new CompactionRangeDelAggregator(&icmp_, snapshots_)); range_del_agg_->AddTombstones(std::move(range_del_iter)); std::unique_ptr compaction; @@ -298,7 +297,7 @@ class CompactionIteratorTest : public testing::TestWithParam { std::unique_ptr merge_helper_; std::unique_ptr iter_; std::unique_ptr c_iter_; - std::unique_ptr range_del_agg_; + std::unique_ptr range_del_agg_; std::unique_ptr snapshot_checker_; std::atomic shutting_down_{false}; FakeCompaction* compaction_proxy_; diff --git a/db/compaction_job.cc b/db/compaction_job.cc index b29f1615d..3ce38453f 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -36,7 +36,7 @@ #include "db/memtable_list.h" #include "db/merge_context.h" #include "db/merge_helper.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/version_set.h" #include "monitoring/iostats_context_imp.h" #include "monitoring/perf_context_imp.h" @@ -805,8 +805,8 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { assert(sub_compact != nullptr); ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); - CompactionRangeDelAggregatorV2 range_del_agg(&cfd->internal_comparator(), - existing_snapshots_); + CompactionRangeDelAggregator range_del_agg(&cfd->internal_comparator(), + existing_snapshots_); // Although the v2 aggregator is what the level iterator(s) know about, // the AddTombstones calls will be propagated down to the v1 aggregator. @@ -1165,7 +1165,7 @@ void CompactionJob::RecordDroppedKeys( Status CompactionJob::FinishCompactionOutputFile( const Status& input_status, SubcompactionState* sub_compact, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, CompactionIterationStats* range_del_out_stats, const Slice* next_table_min_key /* = nullptr */) { AutoThreadOperationStageUpdater stage_updater( diff --git a/db/compaction_job.h b/db/compaction_job.h index 86d97e1db..596b5cc60 100644 --- a/db/compaction_job.h +++ b/db/compaction_job.h @@ -25,7 +25,7 @@ #include "db/job_context.h" #include "db/log_writer.h" #include "db/memtable_list.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/version_edit.h" #include "db/write_controller.h" #include "db/write_thread.h" @@ -104,7 +104,7 @@ class CompactionJob { Status FinishCompactionOutputFile( const Status& input_status, SubcompactionState* sub_compact, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, CompactionIterationStats* range_del_out_stats, const Slice* next_table_min_key = nullptr); Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options); diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 63d2829d5..25045d01d 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -340,8 +340,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { Arena arena; { InternalKeyComparator icmp(options.comparator); - ReadRangeDelAggregatorV2 range_del_agg( - &icmp, kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); ScopedArenaIterator iter(dbfull()->NewInternalIterator( &arena, &range_del_agg, kMaxSequenceNumber, handles_[1])); iter->SeekToFirst(); @@ -430,8 +430,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { count = 0; { InternalKeyComparator icmp(options.comparator); - ReadRangeDelAggregatorV2 range_del_agg( - &icmp, kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); ScopedArenaIterator iter(dbfull()->NewInternalIterator( &arena, &range_del_agg, kMaxSequenceNumber, handles_[1])); iter->SeekToFirst(); @@ -648,8 +648,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) { int total = 0; Arena arena; InternalKeyComparator icmp(options.comparator); - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* snapshots */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* snapshots */); ScopedArenaIterator iter(dbfull()->NewInternalIterator( &arena, &range_del_agg, kMaxSequenceNumber)); iter->SeekToFirst(); diff --git a/db/db_impl.cc b/db/db_impl.cc index 78ade2c8b..4f47de4d5 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -45,7 +45,6 @@ #include "db/memtable_list.h" #include "db/merge_context.h" #include "db/merge_helper.h" -#include "db/range_del_aggregator.h" #include "db/range_tombstone_fragmenter.h" #include "db/table_cache.h" #include "db/table_properties_collector.h" @@ -1033,7 +1032,7 @@ bool DBImpl::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) { } InternalIterator* DBImpl::NewInternalIterator( - Arena* arena, RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence, + Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence, ColumnFamilyHandle* column_family) { ColumnFamilyData* cfd; if (column_family == nullptr) { @@ -1150,10 +1149,12 @@ static void CleanupIteratorState(void* arg1, void* /*arg2*/) { } } // namespace -InternalIterator* DBImpl::NewInternalIterator( - const ReadOptions& read_options, ColumnFamilyData* cfd, - SuperVersion* super_version, Arena* arena, - RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence) { +InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options, + ColumnFamilyData* cfd, + SuperVersion* super_version, + Arena* arena, + RangeDelAggregator* range_del_agg, + SequenceNumber sequence) { InternalIterator* internal_iter; assert(arena != nullptr); assert(range_del_agg != nullptr); diff --git a/db/db_impl.h b/db/db_impl.h index d99f19b87..cbfdd0dd6 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -31,7 +31,7 @@ #include "db/log_writer.h" #include "db/logs_with_prep_tracker.h" #include "db/pre_release_callback.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/read_callback.h" #include "db/snapshot_checker.h" #include "db/snapshot_impl.h" @@ -375,8 +375,8 @@ class DBImpl : public DB { // The keys of this iterator are internal keys (see format.h). // The returned iterator should be deleted when no longer needed. InternalIterator* NewInternalIterator( - Arena* arena, RangeDelAggregatorV2* range_del_agg, - SequenceNumber sequence, ColumnFamilyHandle* column_family = nullptr); + Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence, + ColumnFamilyHandle* column_family = nullptr); LogsWithPrepTracker* logs_with_prep_tracker() { return &logs_with_prep_tracker_; @@ -579,12 +579,9 @@ class DBImpl : public DB { const WriteController& write_controller() { return write_controller_; } - InternalIterator* NewInternalIterator(const ReadOptions&, - ColumnFamilyData* cfd, - SuperVersion* super_version, - Arena* arena, - RangeDelAggregatorV2* range_del_agg, - SequenceNumber sequence); + InternalIterator* NewInternalIterator( + const ReadOptions&, ColumnFamilyData* cfd, SuperVersion* super_version, + Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence); // hollow transactions shell used for recovery. // these will then be passed to TransactionDB so that diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index c4a55b6ec..bd7099f00 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -9,7 +9,6 @@ #include "db/db_impl.h" #include "db/db_iter.h" #include "db/merge_context.h" -#include "db/range_del_aggregator.h" #include "monitoring/perf_context_imp.h" namespace rocksdb { diff --git a/db/db_iter.cc b/db/db_iter.cc index cc4a0d5f4..348247aa3 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -171,7 +171,7 @@ class DBIter final: public Iterator { iter_ = iter; iter_->SetPinnedItersMgr(&pinned_iters_mgr_); } - virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator() { + virtual ReadRangeDelAggregator* GetRangeDelAggregator() { return &range_del_agg_; } @@ -341,7 +341,7 @@ class DBIter final: public Iterator { const bool total_order_seek_; // List of operands for merge operator. MergeContext merge_context_; - ReadRangeDelAggregatorV2 range_del_agg_; + ReadRangeDelAggregator range_del_agg_; LocalStatistics local_stats_; PinnedIteratorsManager pinned_iters_mgr_; ReadCallback* read_callback_; @@ -1479,7 +1479,7 @@ Iterator* NewDBIterator(Env* env, const ReadOptions& read_options, ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); } -ReadRangeDelAggregatorV2* ArenaWrappedDBIter::GetRangeDelAggregator() { +ReadRangeDelAggregator* ArenaWrappedDBIter::GetRangeDelAggregator() { return db_iter_->GetRangeDelAggregator(); } diff --git a/db/db_iter.h b/db/db_iter.h index 6ee869135..a640f0296 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -12,7 +12,7 @@ #include #include "db/db_impl.h" #include "db/dbformat.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "options/cf_options.h" #include "rocksdb/db.h" #include "rocksdb/iterator.h" @@ -48,7 +48,7 @@ class ArenaWrappedDBIter : public Iterator { // Get the arena to be used to allocate memory for DBIter to be wrapped, // as well as child iterators in it. virtual Arena* GetArena() { return &arena_; } - virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator(); + virtual ReadRangeDelAggregator* GetRangeDelAggregator(); // Set the internal iterator wrapped inside the DB Iterator. Usually it is // a merging iterator. diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 5f47a9481..96025d7db 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -8,6 +8,7 @@ #include "db/db_test_util.h" #include "db/memtable.h" +#include "db/range_del_aggregator.h" #include "port/stack_trace.h" #include "rocksdb/memtablerep.h" #include "rocksdb/slice_transform.h" @@ -135,7 +136,8 @@ TEST_F(DBMemTableTest, DuplicateSeq) { MergeContext merge_context; Options options; InternalKeyComparator ikey_cmp(options.comparator); - RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */); + ReadRangeDelAggregator range_del_agg(&ikey_cmp, + kMaxSequenceNumber /* upper_bound */); // Create a MemTable InternalKeyComparator cmp(BytewiseComparator()); diff --git a/db/db_test_util.cc b/db/db_test_util.cc index eeff7be51..de096d254 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -814,8 +814,8 @@ std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) { Arena arena; auto options = CurrentOptions(); InternalKeyComparator icmp(options.comparator); - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); ScopedArenaIterator iter; if (cf == 0) { iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg, @@ -1227,8 +1227,8 @@ void DBTestBase::validateNumberOfEntries(int numValues, int cf) { Arena arena; auto options = CurrentOptions(); InternalKeyComparator icmp(options.comparator); - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); // This should be defined after range_del_agg so that it destructs the // assigned iterator before it range_del_agg is already destructed. ScopedArenaIterator iter; @@ -1437,8 +1437,8 @@ void DBTestBase::VerifyDBInternal( std::vector> true_data) { Arena arena; InternalKeyComparator icmp(last_options_.comparator); - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); auto iter = dbfull()->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber); iter->SeekToFirst(); diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index 226d56d5f..f44a09756 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -15,7 +15,7 @@ #include "db/db_iter.h" #include "db/dbformat.h" #include "db/job_context.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/range_tombstone_fragmenter.h" #include "rocksdb/env.h" #include "rocksdb/slice.h" @@ -73,8 +73,8 @@ class ForwardLevelIterator : public InternalIterator { delete file_iter_; } - ReadRangeDelAggregatorV2 range_del_agg( - &cfd_->internal_comparator(), kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(), + kMaxSequenceNumber /* upper_bound */); file_iter_ = cfd_->table_cache()->NewIterator( read_options_, *(cfd_->soptions()), cfd_->internal_comparator(), *files_[file_index_], @@ -610,8 +610,8 @@ void ForwardIterator::RebuildIterators(bool refresh_sv) { // New sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_)); } - ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(), - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(), + kMaxSequenceNumber /* upper_bound */); mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_); sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_); if (!read_options_.ignore_range_deletions) { @@ -669,8 +669,8 @@ void ForwardIterator::RenewIterators() { mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_); svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_); - ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(), - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(), + kMaxSequenceNumber /* upper_bound */); if (!read_options_.ignore_range_deletions) { std::unique_ptr range_del_iter( svnew->mem->NewRangeTombstoneIterator( diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 39e00285b..36c0a8f1d 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -159,7 +159,7 @@ bool MemTableListVersion::GetFromList( Status MemTableListVersion::AddRangeTombstoneIterators( const ReadOptions& read_opts, Arena* /*arena*/, - RangeDelAggregatorV2* range_del_agg) { + RangeDelAggregator* range_del_agg) { assert(range_del_agg != nullptr); for (auto& m : memlist_) { // Using kMaxSequenceNumber is OK because these are immutable memtables. diff --git a/db/memtable_list.h b/db/memtable_list.h index 70bab1c38..6315167a1 100644 --- a/db/memtable_list.h +++ b/db/memtable_list.h @@ -15,7 +15,7 @@ #include "db/dbformat.h" #include "db/logs_with_prep_tracker.h" #include "db/memtable.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "monitoring/instrumented_mutex.h" #include "rocksdb/db.h" #include "rocksdb/iterator.h" @@ -91,7 +91,7 @@ class MemTableListVersion { } Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena, - RangeDelAggregatorV2* range_del_agg); + RangeDelAggregator* range_del_agg); void AddIterators(const ReadOptions& options, std::vector* iterator_list, diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index 06554f1ab..96032a465 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -8,7 +8,6 @@ #include #include #include "db/merge_context.h" -#include "db/range_del_aggregator.h" #include "db/version_set.h" #include "db/write_controller.h" #include "rocksdb/db.h" diff --git a/db/merge_context.h b/db/merge_context.h index c226f64e5..fd06441f7 100644 --- a/db/merge_context.h +++ b/db/merge_context.h @@ -79,7 +79,8 @@ class MergeContext { return GetOperandsDirectionForward(); } - // Return all the operands in the order as they were merged (passed to FullMerge or FullMergeV2) + // Return all the operands in the order as they were merged (passed to + // FullMerge or FullMergeV2) const std::vector& GetOperandsDirectionForward() { if (!operand_list_) { return empty_operand_list; @@ -89,7 +90,8 @@ class MergeContext { return *operand_list_; } - // Return all the operands in the reversed order relative to how they were merged (passed to FullMerge or FullMergeV2) + // Return all the operands in the reversed order relative to how they were + // merged (passed to FullMerge or FullMergeV2) const std::vector& GetOperandsDirectionBackward() { if (!operand_list_) { return empty_operand_list; diff --git a/db/merge_helper.cc b/db/merge_helper.cc index 6f7e760ec..f33dafd8e 100644 --- a/db/merge_helper.cc +++ b/db/merge_helper.cc @@ -114,7 +114,7 @@ Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator, // TODO: Avoid the snapshot stripe map lookup in CompactionRangeDelAggregator // and just pass the StripeRep corresponding to the stripe being merged. Status MergeHelper::MergeUntil(InternalIterator* iter, - CompactionRangeDelAggregatorV2* range_del_agg, + CompactionRangeDelAggregator* range_del_agg, const SequenceNumber stop_before, const bool at_bottom) { // Get a copy of the internal key, before it's invalidated by iter->Next() diff --git a/db/merge_helper.h b/db/merge_helper.h index 1c92a3492..670cba598 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -11,7 +11,7 @@ #include "db/dbformat.h" #include "db/merge_context.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/snapshot_checker.h" #include "rocksdb/compaction_filter.h" #include "rocksdb/env.h" @@ -78,7 +78,7 @@ class MergeHelper { // // REQUIRED: The first key in the input is not corrupted. Status MergeUntil(InternalIterator* iter, - CompactionRangeDelAggregatorV2* range_del_agg = nullptr, + CompactionRangeDelAggregator* range_del_agg = nullptr, const SequenceNumber stop_before = 0, const bool at_bottom = false); diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index 331758558..8a6b0a51f 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -1,709 +1,492 @@ -// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). #include "db/range_del_aggregator.h" -#include "util/heap.h" -#include +#include "db/compaction_iteration_stats.h" +#include "db/dbformat.h" +#include "db/pinned_iterators_manager.h" +#include "db/range_del_aggregator.h" +#include "db/range_tombstone_fragmenter.h" +#include "db/version_edit.h" +#include "include/rocksdb/comparator.h" +#include "include/rocksdb/types.h" +#include "table/internal_iterator.h" +#include "table/scoped_arena_iterator.h" +#include "table/table_builder.h" +#include "util/heap.h" +#include "util/kv_map.h" +#include "util/vector_iterator.h" namespace rocksdb { -struct TombstoneStartKeyComparator { - explicit TombstoneStartKeyComparator(const InternalKeyComparator* c) - : cmp(c) {} - - bool operator()(const TruncatedRangeTombstone& a, - const TruncatedRangeTombstone& b) const { - return cmp->Compare(a.start_key_, b.start_key_) < 0; - } - - const InternalKeyComparator* cmp; -}; - -// An UncollapsedRangeDelMap is quick to create but slow to answer ShouldDelete -// queries. -class UncollapsedRangeDelMap : public RangeDelMap { - typedef std::vector Rep; - - class Iterator : public RangeDelIterator { - const Rep& rep_; - Rep::const_iterator iter_; - - public: - Iterator(const Rep& rep) : rep_(rep), iter_(rep.begin()) {} - bool Valid() const override { return iter_ != rep_.end(); } - void Next() override { iter_++; } - - void Seek(const Slice&) override { - fprintf(stderr, - "UncollapsedRangeDelMap::Iterator::Seek(Slice&) unimplemented\n"); - abort(); +TruncatedRangeDelIterator::TruncatedRangeDelIterator( + std::unique_ptr iter, + const InternalKeyComparator* icmp, const InternalKey* smallest, + const InternalKey* largest) + : iter_(std::move(iter)), + icmp_(icmp), + smallest_ikey_(smallest), + largest_ikey_(largest) { + if (smallest != nullptr) { + pinned_bounds_.emplace_back(); + auto& parsed_smallest = pinned_bounds_.back(); + if (!ParseInternalKey(smallest->Encode(), &parsed_smallest)) { + assert(false); } - - void Seek(const ParsedInternalKey&) override { - fprintf(stderr, - "UncollapsedRangeDelMap::Iterator::Seek(ParsedInternalKey&) " - "unimplemented\n"); - abort(); + smallest_ = &parsed_smallest; + } + if (largest != nullptr) { + pinned_bounds_.emplace_back(); + auto& parsed_largest = pinned_bounds_.back(); + if (!ParseInternalKey(largest->Encode(), &parsed_largest)) { + assert(false); } - - RangeTombstone Tombstone() const override { return iter_->Tombstone(); } - }; - - Rep rep_; - const InternalKeyComparator* icmp_; - - public: - explicit UncollapsedRangeDelMap(const InternalKeyComparator* icmp) - : icmp_(icmp) {} - - bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) override { - (void)mode; - assert(mode == RangeDelPositioningMode::kFullScan); - for (const auto& tombstone : rep_) { - if (icmp_->Compare(parsed, tombstone.start_key_) < 0) { - continue; - } - if (parsed.sequence < tombstone.seq_ && - icmp_->Compare(parsed, tombstone.end_key_) < 0) { - return true; - } + if (parsed_largest.type == kTypeRangeDeletion && + parsed_largest.sequence == kMaxSequenceNumber) { + // The file boundary has been artificially extended by a range tombstone. + // We do not need to adjust largest to properly truncate range + // tombstones that extend past the boundary. + } else if (parsed_largest.sequence == 0) { + // The largest key in the sstable has a sequence number of 0. Since we + // guarantee that no internal keys with the same user key and sequence + // number can exist in a DB, we know that the largest key in this sstable + // cannot exist as the smallest key in the next sstable. This further + // implies that no range tombstone in this sstable covers largest; + // otherwise, the file boundary would have been artificially extended. + // + // Therefore, we will never truncate a range tombstone at largest, so we + // can leave it unchanged. + } else { + // The same user key may straddle two sstable boundaries. To ensure that + // the truncated end key can cover the largest key in this sstable, reduce + // its sequence number by 1. + parsed_largest.sequence -= 1; } - return false; + largest_ = &parsed_largest; } +} - bool IsRangeOverlapped(const ParsedInternalKey& start, - const ParsedInternalKey& end) override { - for (const auto& tombstone : rep_) { - if (icmp_->Compare(start, tombstone.end_key_) < 0 && - icmp_->Compare(tombstone.start_key_, end) <= 0 && - icmp_->Compare(tombstone.start_key_, tombstone.end_key_) < 0) { - return true; - } - } - return false; - } +bool TruncatedRangeDelIterator::Valid() const { + return iter_->Valid() && + (smallest_ == nullptr || + icmp_->Compare(*smallest_, iter_->parsed_end_key()) < 0) && + (largest_ == nullptr || + icmp_->Compare(iter_->parsed_start_key(), *largest_) < 0); +} - void AddTombstone(TruncatedRangeTombstone tombstone) override { - rep_.emplace_back(tombstone); - } +void TruncatedRangeDelIterator::Next() { iter_->TopNext(); } - size_t Size() const override { return rep_.size(); } +void TruncatedRangeDelIterator::Prev() { iter_->TopPrev(); } - void InvalidatePosition() override {} // no-op +void TruncatedRangeDelIterator::InternalNext() { iter_->Next(); } - std::unique_ptr NewIterator() override { - std::sort(rep_.begin(), rep_.end(), TombstoneStartKeyComparator(icmp_)); - return std::unique_ptr(new Iterator(this->rep_)); +// NOTE: target is a user key +void TruncatedRangeDelIterator::Seek(const Slice& target) { + if (largest_ != nullptr && + icmp_->Compare(*largest_, ParsedInternalKey(target, kMaxSequenceNumber, + kTypeRangeDeletion)) <= 0) { + iter_->Invalidate(); + return; } -}; - -// A CollapsedRangeDelMap is slow to create but quick to answer ShouldDelete -// queries. -// -// An explanation of the design follows. Suppose we have tombstones [b, n) @ 1, -// [e, h) @ 2, [q, t) @ 2, and [g, k) @ 3. Visually, the tombstones look like -// this: -// -// 3: g---k -// 2: e---h q--t -// 1: b------------n -// -// The CollapsedRangeDelMap representation is based on the observation that -// wherever tombstones overlap, we need only store the tombstone with the -// largest seqno. From the perspective of a read at seqno 4 or greater, this set -// of tombstones is exactly equivalent: -// -// 3: g---k -// 2: e--g q--t -// 1: b--e k--n -// -// Because these tombstones do not overlap, they can be efficiently represented -// in an ordered map from keys to sequence numbers. Each entry should be thought -// of as a transition from one tombstone to the next. In this example, the -// CollapsedRangeDelMap would store the following entries, in order: -// -// b → 1, e → 2, g → 3, k → 1, n → 0, q → 2, t → 0 -// -// If a tombstone ends before the next tombstone begins, a sentinel seqno of 0 -// is installed to indicate that no tombstone exists. This occurs at keys n and -// t in the example above. -// -// To check whether a key K is covered by a tombstone, the map is binary -// searched for the last key less than K. K is covered iff the map entry has a -// larger seqno than K. As an example, consider the key h @ 4. It would be -// compared against the map entry g → 3 and determined to be uncovered. By -// contrast, the key h @ 2 would be determined to be covered. -class CollapsedRangeDelMap : public RangeDelMap { - typedef std::map - Rep; - - class Iterator : public RangeDelIterator { - void MaybeSeekPastSentinel() { - if (Valid() && iter_->second == 0) { - iter_++; - } - } - - const Rep& rep_; - Rep::const_iterator iter_; - - public: - Iterator(const Rep& rep) : rep_(rep), iter_(rep.begin()) {} - - bool Valid() const override { return iter_ != rep_.end(); } - - void Next() override { - iter_++; - MaybeSeekPastSentinel(); - } - - void Seek(const Slice&) override { - fprintf(stderr, "CollapsedRangeDelMap::Iterator::Seek(Slice&) unimplemented\n"); - abort(); - } - - void Seek(const ParsedInternalKey& target) override { - iter_ = rep_.upper_bound(target); - if (iter_ != rep_.begin()) { - iter_--; - } - MaybeSeekPastSentinel(); - } - - RangeTombstone Tombstone() const override { - assert(Valid()); - assert(std::next(iter_) != rep_.end()); - assert(iter_->second != 0); - RangeTombstone tombstone; - tombstone.start_key_ = iter_->first.user_key; - tombstone.end_key_ = std::next(iter_)->first.user_key; - tombstone.seq_ = iter_->second; - return tombstone; - } - }; - - Rep rep_; - Rep::iterator iter_; - const InternalKeyComparator* icmp_; - - public: - explicit CollapsedRangeDelMap(const InternalKeyComparator* icmp) - : rep_(ParsedInternalKeyComparator(icmp)), - icmp_(icmp) { - InvalidatePosition(); + if (smallest_ != nullptr && + icmp_->user_comparator()->Compare(target, smallest_->user_key) < 0) { + iter_->Seek(smallest_->user_key); + return; } + iter_->Seek(target); +} - bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) override { - if (iter_ == rep_.end() && - (mode == RangeDelPositioningMode::kForwardTraversal || - mode == RangeDelPositioningMode::kBackwardTraversal)) { - // invalid (e.g., if AddTombstones() changed the deletions), so need to - // reseek - mode = RangeDelPositioningMode::kBinarySearch; - } - switch (mode) { - case RangeDelPositioningMode::kFullScan: - assert(false); - case RangeDelPositioningMode::kForwardTraversal: - assert(iter_ != rep_.end()); - if (iter_ == rep_.begin() && - icmp_->Compare(parsed, iter_->first) < 0) { - // before start of deletion intervals - return false; - } - while (std::next(iter_) != rep_.end() && - icmp_->Compare(std::next(iter_)->first, parsed) <= 0) { - ++iter_; - } - break; - case RangeDelPositioningMode::kBackwardTraversal: - assert(iter_ != rep_.end()); - while (iter_ != rep_.begin() && - icmp_->Compare(parsed, iter_->first) < 0) { - --iter_; - } - if (iter_ == rep_.begin() && - icmp_->Compare(parsed, iter_->first) < 0) { - // before start of deletion intervals - return false; - } - break; - case RangeDelPositioningMode::kBinarySearch: - iter_ = rep_.upper_bound(parsed); - if (iter_ == rep_.begin()) { - // before start of deletion intervals - return false; - } - --iter_; - break; - } - assert(iter_ != rep_.end() && - icmp_->Compare(iter_->first, parsed) <= 0); - assert(std::next(iter_) == rep_.end() || - icmp_->Compare(parsed, std::next(iter_)->first) < 0); - return parsed.sequence < iter_->second; +// NOTE: target is a user key +void TruncatedRangeDelIterator::SeekForPrev(const Slice& target) { + if (smallest_ != nullptr && + icmp_->Compare(ParsedInternalKey(target, 0, kTypeRangeDeletion), + *smallest_) < 0) { + iter_->Invalidate(); + return; } - - bool IsRangeOverlapped(const ParsedInternalKey&, - const ParsedInternalKey&) override { - // Unimplemented because the only client of this method, file ingestion, - // uses uncollapsed maps. - fprintf(stderr, "CollapsedRangeDelMap::IsRangeOverlapped unimplemented"); - abort(); + if (largest_ != nullptr && + icmp_->user_comparator()->Compare(largest_->user_key, target) < 0) { + iter_->SeekForPrev(largest_->user_key); + return; } + iter_->SeekForPrev(target); +} - void AddTombstone(TruncatedRangeTombstone t) override { - if (icmp_->Compare(t.start_key_, t.end_key_) >= 0 || t.seq_ == 0) { - // The tombstone covers no keys. Nothing to do. - return; - } - - auto it = rep_.upper_bound(t.start_key_); - auto prev_seq = [&]() { - return it == rep_.begin() ? 0 : std::prev(it)->second; - }; - - // end_seq stores the seqno of the last transition that the new tombstone - // covered. This is the seqno that we'll install if we need to insert a - // transition for the new tombstone's end key. - SequenceNumber end_seq = 0; - - // In the diagrams below, the new tombstone is always [c, k) @ 2. The - // existing tombstones are varied to depict different scenarios. Uppercase - // letters are used to indicate points that exist in the map, while - // lowercase letters are used to indicate points that do not exist in the - // map. The location of the iterator is marked with a caret; it may point - // off the end of the diagram to indicate that it is positioned at a - // entry with a larger key whose specific key is irrelevant. - - if (t.seq_ > prev_seq()) { - // The new tombstone's start point covers the existing tombstone: - // - // 3: 3: A--C 3: 3: - // 2: c--- OR 2: c--- OR 2: c--- OR 2: c------ - // 1: A--C 1: 1: A------ 1: C------ - // ^ ^ ^ ^ - end_seq = prev_seq(); - Rep::iterator pit; - if (it != rep_.begin() && (pit = std::prev(it)) != rep_.begin() && - icmp_->Compare(pit->first, t.start_key_) == 0 && - std::prev(pit)->second == t.seq_) { - // The new tombstone starts at the end of an existing tombstone with an - // identical seqno: - // - // 3: - // 2: A--C--- - // 1: - // ^ - // Merge the tombstones by removing the existing tombstone's end key. - it = rep_.erase(std::prev(it)); - } else { - // Insert a new transition at the new tombstone's start point, or raise - // the existing transition at that point to the new tombstone's seqno. - rep_[t.start_key_] = t.seq_; // operator[] will overwrite existing entry - } - } else { - // The new tombstone's start point is covered by an existing tombstone: - // - // 3: A----- OR 3: C------ OR - // 2: c--- 2: c------ 2: C------ - // ^ ^ ^ - // Do nothing. - } - - // Look at all the existing transitions that overlap the new tombstone. - while (it != rep_.end() && icmp_->Compare(it->first, t.end_key_) < 0) { - if (t.seq_ >= it->second) { - // The transition is to an existing tombstone that the new tombstone - // covers. Save the covered tombstone's seqno. We'll need to return to - // it if the new tombstone ends before the existing tombstone. - end_seq = it->second; - - if (t.seq_ == prev_seq()) { - // The previous transition is to the seqno of the new tombstone: - // - // 3: 3: 3: --F - // 2: C------ OR 2: C------ OR 2: F---- - // 1: F--- 1: ---F 1: H-- - // ^ ^ ^ - // - // Erase this transition. It's been superseded. - it = rep_.erase(it); - continue; // skip increment; erase positions iterator correctly - } else { - // The previous transition is to a tombstone that covers the new - // tombstone, but this transition is to a tombstone that is covered by - // the new tombstone. That is, this is the end of a run of existing - // tombstones that cover the new tombstone: - // - // 3: A---E OR 3: E-G - // 2: c---- 2: ------ - // ^ ^ - // Preserve this transition point, but raise it to the new tombstone's - // seqno. - it->second = t.seq_; - } - } else { - // The transition is to an existing tombstone that covers the new - // tombstone: - // - // 4: 4: --F - // 3: F-- OR 3: F-- - // 2: ----- 2: ----- - // ^ ^ - // Do nothing. - } - ++it; - } - - if (t.seq_ == prev_seq()) { - // The new tombstone is unterminated in the map. - if (it != rep_.end() && t.seq_ == it->second && - icmp_->Compare(it->first, t.end_key_) == 0) { - // The new tombstone ends at the start of another tombstone with an - // identical seqno. Merge the tombstones by removing the existing - // tombstone's start key. - rep_.erase(it); - } else if (end_seq == prev_seq() || - (it != rep_.end() && end_seq == it->second)) { - // The new tombstone is implicitly ended because its end point is - // contained within an existing tombstone with the same seqno: - // - // 2: ---k--N - // ^ - } else { - // The new tombstone needs an explicit end point. - // - // 3: OR 3: --G OR 3: --G K-- - // 2: C-------k 2: G---k 2: G---k - // ^ ^ ^ - // Install one that returns to the last seqno we covered. Because end - // keys are exclusive, if there's an existing transition at t.end_key_, - // it takes precedence over the transition that we install here. - rep_.emplace(t.end_key_, - end_seq); // emplace is a noop if existing entry - } - } else { - // The new tombstone is implicitly ended because its end point is covered - // by an existing tombstone with a higher seqno. - // - // 3: I---M OR 3: A-----------M - // 2: ----k 2: c-------k - // ^ ^ - // Do nothing. - } +void TruncatedRangeDelIterator::SeekToFirst() { + if (smallest_ != nullptr) { + iter_->Seek(smallest_->user_key); + return; } + iter_->SeekToTopFirst(); +} - size_t Size() const override { return rep_.empty() ? 0 : rep_.size() - 1; } +void TruncatedRangeDelIterator::SeekToLast() { + if (largest_ != nullptr) { + iter_->SeekForPrev(largest_->user_key); + return; + } + iter_->SeekToTopLast(); +} - void InvalidatePosition() override { iter_ = rep_.end(); } +std::map> +TruncatedRangeDelIterator::SplitBySnapshot( + const std::vector& snapshots) { + using FragmentedIterPair = + std::pair>; + + auto split_untruncated_iters = iter_->SplitBySnapshot(snapshots); + std::map> + split_truncated_iters; + std::for_each( + split_untruncated_iters.begin(), split_untruncated_iters.end(), + [&](FragmentedIterPair& iter_pair) { + std::unique_ptr truncated_iter( + new TruncatedRangeDelIterator(std::move(iter_pair.second), icmp_, + smallest_ikey_, largest_ikey_)); + split_truncated_iters.emplace(iter_pair.first, + std::move(truncated_iter)); + }); + return split_truncated_iters; +} - std::unique_ptr NewIterator() override { - return std::unique_ptr(new Iterator(this->rep_)); +ForwardRangeDelIterator::ForwardRangeDelIterator( + const InternalKeyComparator* icmp, + const std::vector>* iters) + : icmp_(icmp), + iters_(iters), + unused_idx_(0), + active_seqnums_(SeqMaxComparator()), + active_iters_(EndKeyMinComparator(icmp)), + inactive_iters_(StartKeyMinComparator(icmp)) {} + +bool ForwardRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) { + assert(iters_ != nullptr); + // Move active iterators that end before parsed. + while (!active_iters_.empty() && + icmp_->Compare((*active_iters_.top())->end_key(), parsed) <= 0) { + TruncatedRangeDelIterator* iter = PopActiveIter(); + do { + iter->Next(); + } while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0); + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); + } + + // Move inactive iterators that start before parsed. + while (!inactive_iters_.empty() && + icmp_->Compare(inactive_iters_.top()->start_key(), parsed) <= 0) { + TruncatedRangeDelIterator* iter = PopInactiveIter(); + while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0) { + iter->Next(); + } + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); } -}; -RangeDelAggregator::RangeDelAggregator( - const InternalKeyComparator& icmp, - const std::vector& snapshots, - bool collapse_deletions /* = true */) - : upper_bound_(kMaxSequenceNumber), - icmp_(icmp), - collapse_deletions_(collapse_deletions) { - InitRep(snapshots); + return active_seqnums_.empty() + ? false + : (*active_seqnums_.begin())->seq() > parsed.sequence; } -RangeDelAggregator::RangeDelAggregator(const InternalKeyComparator& icmp, - SequenceNumber snapshot, - bool collapse_deletions /* = false */) - : upper_bound_(snapshot), - icmp_(icmp), - collapse_deletions_(collapse_deletions) {} - -void RangeDelAggregator::InitRep(const std::vector& snapshots) { - assert(rep_ == nullptr); - rep_.reset(new Rep()); - rep_->snapshots_ = snapshots; - // Data newer than any snapshot falls in this catch-all stripe - rep_->snapshots_.emplace_back(kMaxSequenceNumber); - rep_->pinned_iters_mgr_.StartPinning(); +void ForwardRangeDelIterator::Invalidate() { + unused_idx_ = 0; + active_iters_.clear(); + active_seqnums_.clear(); + inactive_iters_.clear(); } -std::unique_ptr RangeDelAggregator::NewRangeDelMap() { - RangeDelMap* tombstone_map; - if (collapse_deletions_) { - tombstone_map = new CollapsedRangeDelMap(&icmp_); - } else { - tombstone_map = new UncollapsedRangeDelMap(&icmp_); +ReverseRangeDelIterator::ReverseRangeDelIterator( + const InternalKeyComparator* icmp, + const std::vector>* iters) + : icmp_(icmp), + iters_(iters), + unused_idx_(0), + active_seqnums_(SeqMaxComparator()), + active_iters_(StartKeyMaxComparator(icmp)), + inactive_iters_(EndKeyMaxComparator(icmp)) {} + +bool ReverseRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) { + assert(iters_ != nullptr); + // Move active iterators that start after parsed. + while (!active_iters_.empty() && + icmp_->Compare(parsed, (*active_iters_.top())->start_key()) < 0) { + TruncatedRangeDelIterator* iter = PopActiveIter(); + do { + iter->Prev(); + } while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0); + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); + } + + // Move inactive iterators that end after parsed. + while (!inactive_iters_.empty() && + icmp_->Compare(parsed, inactive_iters_.top()->end_key()) < 0) { + TruncatedRangeDelIterator* iter = PopInactiveIter(); + while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0) { + iter->Prev(); + } + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); } - return std::unique_ptr(tombstone_map); -} -bool RangeDelAggregator::ShouldDeleteImpl(const Slice& internal_key, - RangeDelPositioningMode mode) { - assert(rep_ != nullptr); - ParsedInternalKey parsed; - if (!ParseInternalKey(internal_key, &parsed)) { - assert(false); - return false; - } - return ShouldDeleteImpl(parsed, mode); + return active_seqnums_.empty() + ? false + : (*active_seqnums_.begin())->seq() > parsed.sequence; } -bool RangeDelAggregator::ShouldDeleteImpl(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) { - assert(IsValueType(parsed.type)); - assert(rep_ != nullptr); - auto* tombstone_map = GetRangeDelMapIfExists(parsed.sequence); - if (tombstone_map == nullptr || tombstone_map->IsEmpty()) { - return false; - } - return tombstone_map->ShouldDelete(parsed, mode); +void ReverseRangeDelIterator::Invalidate() { + unused_idx_ = 0; + active_iters_.clear(); + active_seqnums_.clear(); + inactive_iters_.clear(); } -bool RangeDelAggregator::IsRangeOverlapped(const Slice& start, - const Slice& end) { - // Unimplemented because the only client of this method, file ingestion, - // uses uncollapsed maps. - assert(!collapse_deletions_); - if (rep_ == nullptr) { +bool RangeDelAggregator::StripeRep::ShouldDelete( + const ParsedInternalKey& parsed, RangeDelPositioningMode mode) { + if (!InStripe(parsed.sequence) || IsEmpty()) { return false; } - ParsedInternalKey start_ikey(start, kMaxSequenceNumber, kMaxValue); - ParsedInternalKey end_ikey(end, 0, static_cast(0)); - for (const auto& stripe : rep_->stripe_map_) { - if (stripe.second.first->IsRangeOverlapped(start_ikey, end_ikey)) { - return true; - } + switch (mode) { + case RangeDelPositioningMode::kForwardTraversal: + InvalidateReverseIter(); + + // Pick up previously unseen iterators. + for (auto it = std::next(iters_.begin(), forward_iter_.UnusedIdx()); + it != iters_.end(); ++it, forward_iter_.IncUnusedIdx()) { + auto& iter = *it; + forward_iter_.AddNewIter(iter.get(), parsed); + } + + return forward_iter_.ShouldDelete(parsed); + case RangeDelPositioningMode::kBackwardTraversal: + InvalidateForwardIter(); + + // Pick up previously unseen iterators. + for (auto it = std::next(iters_.begin(), reverse_iter_.UnusedIdx()); + it != iters_.end(); ++it, reverse_iter_.IncUnusedIdx()) { + auto& iter = *it; + reverse_iter_.AddNewIter(iter.get(), parsed); + } + + return reverse_iter_.ShouldDelete(parsed); + default: + assert(false); + return false; } - return false; } -Status RangeDelAggregator::AddTombstones( - std::unique_ptr input, - const InternalKey* smallest, - const InternalKey* largest) { - if (input == nullptr) { - return Status::OK(); - } - input->SeekToFirst(); - bool first_iter = true; - while (input->Valid()) { - if (first_iter) { - if (rep_ == nullptr) { - InitRep({upper_bound_}); - } else { - InvalidateRangeDelMapPositions(); - } - first_iter = false; - } - ParsedInternalKey parsed_key; - bool parsed; - if (input->IsKeyPinned()) { - parsed = ParseInternalKey(input->key(), &parsed_key); - } else { - // The tombstone map holds slices into the iterator's memory. Make a - // copy of the key if it is not pinned. - rep_->pinned_slices_.emplace_back(input->key().data(), - input->key().size()); - parsed = ParseInternalKey(rep_->pinned_slices_.back(), &parsed_key); - } - if (!parsed) { - return Status::Corruption("Unable to parse range tombstone InternalKey"); - } - Slice end_user_key; - if (input->IsValuePinned()) { - end_user_key = input->value(); - } else { - // The tombstone map holds slices into the iterator's memory. Make a - // copy of the value if it is not pinned. - rep_->pinned_slices_.emplace_back(input->value().data(), - input->value().size()); - end_user_key = rep_->pinned_slices_.back(); - } - ParsedInternalKey start_key(parsed_key.user_key, kMaxSequenceNumber, - kMaxValue); - ParsedInternalKey end_key(end_user_key, kMaxSequenceNumber, kMaxValue); - // Truncate the tombstone to the range [smallest, largest]. - if (smallest != nullptr) { - ParsedInternalKey parsed_smallest; - if (ParseInternalKey(smallest->Encode(), &parsed_smallest) && - icmp_.Compare(start_key, parsed_smallest) < 0) { - start_key.user_key = parsed_smallest.user_key; - start_key.sequence = parsed_smallest.sequence; +bool RangeDelAggregator::StripeRep::IsRangeOverlapped(const Slice& start, + const Slice& end) { + Invalidate(); + + // Set the internal start/end keys so that: + // - if start_ikey has the same user key and sequence number as the + // current end key, start_ikey will be considered greater; and + // - if end_ikey has the same user key and sequence number as the current + // start key, end_ikey will be considered greater. + ParsedInternalKey start_ikey(start, kMaxSequenceNumber, + static_cast(0)); + ParsedInternalKey end_ikey(end, 0, static_cast(0)); + for (auto& iter : iters_) { + bool checked_candidate_tombstones = false; + for (iter->SeekForPrev(start); + iter->Valid() && icmp_->Compare(iter->start_key(), end_ikey) <= 0; + iter->Next()) { + checked_candidate_tombstones = true; + if (icmp_->Compare(start_ikey, iter->end_key()) < 0 && + icmp_->Compare(iter->start_key(), end_ikey) <= 0) { + return true; } } - if (largest != nullptr) { - ParsedInternalKey parsed_largest; - if (ParseInternalKey(largest->Encode(), &parsed_largest) && - icmp_.Compare(end_key, parsed_largest) > 0) { - end_key.user_key = parsed_largest.user_key; - if (parsed_largest.sequence != kMaxSequenceNumber) { - // The same user key straddles two adjacent sstables. To make sure we - // can truncate to a range that includes the largest point key in the - // first sstable, set the tombstone end key's sequence number to 1 - // less than the largest key. - assert(parsed_largest.sequence != 0); - end_key.sequence = parsed_largest.sequence - 1; - } else { - // The SST file boundary was artificially extended by a range tombstone. - // We will not see any entries in this SST with this user key, so we - // can leave the seqnum at kMaxSequenceNumber. - } + + if (!checked_candidate_tombstones) { + // Do an additional check for when the end of the range is the begin + // key of a tombstone, which we missed earlier since SeekForPrev'ing + // to the start was invalid. + iter->SeekForPrev(end); + if (iter->Valid() && icmp_->Compare(start_ikey, iter->end_key()) < 0 && + icmp_->Compare(iter->start_key(), end_ikey) <= 0) { + return true; } } - TruncatedRangeTombstone tombstone(start_key, end_key, parsed_key.sequence); - GetRangeDelMap(parsed_key.sequence).AddTombstone(std::move(tombstone)); - input->Next(); } - if (!first_iter) { - rep_->pinned_iters_mgr_.PinIterator(input.release(), false /* arena */); - } - return Status::OK(); + return false; } -void RangeDelAggregator::InvalidateRangeDelMapPositions() { - if (rep_ == nullptr) { +void ReadRangeDelAggregator::AddTombstones( + std::unique_ptr input_iter, + const InternalKey* smallest, const InternalKey* largest) { + if (input_iter == nullptr || input_iter->empty()) { return; } - for (auto& stripe : rep_->stripe_map_) { - stripe.second.first->InvalidatePosition(); - } + rep_.AddTombstones( + std::unique_ptr(new TruncatedRangeDelIterator( + std::move(input_iter), icmp_, smallest, largest))); } -RangeDelMap* RangeDelAggregator::GetRangeDelMapIfExists(SequenceNumber seq) { - assert(rep_ != nullptr); - // The stripe includes seqnum for the snapshot above and excludes seqnum for - // the snapshot below. - if (rep_->stripe_map_.empty()) { - return nullptr; - } - StripeMap::iterator iter = rep_->stripe_map_.lower_bound(seq); - if (iter == rep_->stripe_map_.end()) { - return nullptr; - } - size_t snapshot_idx = iter->second.second; - if (snapshot_idx > 0 && seq <= rep_->snapshots_[snapshot_idx - 1]) { - return nullptr; - } - return iter->second.first.get(); +bool ReadRangeDelAggregator::ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode) { + return rep_.ShouldDelete(parsed, mode); } -RangeDelMap& RangeDelAggregator::GetRangeDelMap(SequenceNumber seq) { - assert(rep_ != nullptr); - // The stripe includes seqnum for the snapshot above and excludes seqnum for - // the snapshot below. - std::vector::iterator iter = - std::lower_bound(rep_->snapshots_.begin(), rep_->snapshots_.end(), seq); - // catch-all stripe justifies this assertion in either of above cases - assert(iter != rep_->snapshots_.end()); - if (rep_->stripe_map_.find(*iter) == rep_->stripe_map_.end()) { - rep_->stripe_map_.emplace( - *iter, - std::make_pair(NewRangeDelMap(), iter - rep_->snapshots_.begin())); - } - return *rep_->stripe_map_[*iter].first; +bool ReadRangeDelAggregator::IsRangeOverlapped(const Slice& start, + const Slice& end) { + InvalidateRangeDelMapPositions(); + return rep_.IsRangeOverlapped(start, end); } -bool RangeDelAggregator::IsEmpty() { - if (rep_ == nullptr) { - return true; +void CompactionRangeDelAggregator::AddTombstones( + std::unique_ptr input_iter, + const InternalKey* smallest, const InternalKey* largest) { + if (input_iter == nullptr || input_iter->empty()) { + return; } - for (const auto& stripe : rep_->stripe_map_) { - if (!stripe.second.first->IsEmpty()) { - return false; + assert(input_iter->lower_bound() == 0); + assert(input_iter->upper_bound() == kMaxSequenceNumber); + parent_iters_.emplace_back(new TruncatedRangeDelIterator( + std::move(input_iter), icmp_, smallest, largest)); + + auto split_iters = parent_iters_.back()->SplitBySnapshot(*snapshots_); + for (auto& split_iter : split_iters) { + auto it = reps_.find(split_iter.first); + if (it == reps_.end()) { + bool inserted; + SequenceNumber upper_bound = split_iter.second->upper_bound(); + SequenceNumber lower_bound = split_iter.second->lower_bound(); + std::tie(it, inserted) = reps_.emplace( + split_iter.first, StripeRep(icmp_, upper_bound, lower_bound)); + assert(inserted); } + assert(it != reps_.end()); + it->second.AddTombstones(std::move(split_iter.second)); } - return true; } -bool RangeDelAggregator::AddFile(uint64_t file_number) { - if (rep_ == nullptr) { - return true; +bool CompactionRangeDelAggregator::ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode) { + auto it = reps_.lower_bound(parsed.sequence); + if (it == reps_.end()) { + return false; } - return rep_->added_files_.emplace(file_number).second; + return it->second.ShouldDelete(parsed, mode); } -class MergingRangeDelIter : public RangeDelIterator { +namespace { + +class TruncatedRangeDelMergingIter : public InternalIterator { public: - MergingRangeDelIter(const Comparator* c) - : heap_(IterMinHeap(IterComparator(c))), current_(nullptr) {} - - void AddIterator(std::unique_ptr iter) { - if (iter->Valid()) { - heap_.push(iter.get()); - iters_.push_back(std::move(iter)); - current_ = heap_.top(); + TruncatedRangeDelMergingIter( + const InternalKeyComparator* icmp, const Slice* lower_bound, + const Slice* upper_bound, bool upper_bound_inclusive, + const std::vector>& children) + : icmp_(icmp), + lower_bound_(lower_bound), + upper_bound_(upper_bound), + upper_bound_inclusive_(upper_bound_inclusive), + heap_(StartKeyMinComparator(icmp)) { + for (auto& child : children) { + if (child != nullptr) { + assert(child->lower_bound() == 0); + assert(child->upper_bound() == kMaxSequenceNumber); + children_.push_back(child.get()); + } } } - bool Valid() const override { return current_ != nullptr; } + bool Valid() const override { + return !heap_.empty() && BeforeEndKey(heap_.top()); + } + Status status() const override { return Status::OK(); } + + void SeekToFirst() override { + heap_.clear(); + for (auto& child : children_) { + if (lower_bound_ != nullptr) { + child->Seek(*lower_bound_); + } else { + child->SeekToFirst(); + } + if (child->Valid()) { + heap_.push(child); + } + } + } void Next() override { - current_->Next(); - if (current_->Valid()) { - heap_.replace_top(current_); + auto* top = heap_.top(); + top->InternalNext(); + if (top->Valid()) { + heap_.replace_top(top); } else { heap_.pop(); } - current_ = heap_.empty() ? nullptr : heap_.top(); } - void Seek(const Slice& target) override { - ParsedInternalKey ikey(target, kMaxSequenceNumber, kMaxValue); - Seek(ikey); + Slice key() const override { + auto* top = heap_.top(); + cur_start_key_.Set(top->start_key().user_key, top->seq(), + kTypeRangeDeletion); + return cur_start_key_.Encode(); } - void Seek(const ParsedInternalKey& target) override { - heap_.clear(); - for (auto& iter : iters_) { - iter->Seek(target); - if (iter->Valid()) { - heap_.push(iter.get()); - } - } - current_ = heap_.empty() ? nullptr : heap_.top(); + Slice value() const override { + auto* top = heap_.top(); + assert(top->end_key().sequence == kMaxSequenceNumber); + return top->end_key().user_key; } - RangeTombstone Tombstone() const override { return current_->Tombstone(); } + // Unused InternalIterator methods + void Prev() override { assert(false); } + void Seek(const Slice& /* target */) override { assert(false); } + void SeekForPrev(const Slice& /* target */) override { assert(false); } + void SeekToLast() override { assert(false); } private: - struct IterComparator { - IterComparator(const Comparator* c) : cmp(c) {} - - bool operator()(const RangeDelIterator* a, - const RangeDelIterator* b) const { - // Note: counterintuitively, returning the tombstone with the larger start - // key puts the tombstone with the smallest key at the top of the heap. - return cmp->Compare(a->Tombstone().start_key_, - b->Tombstone().start_key_) > 0; + bool BeforeEndKey(const TruncatedRangeDelIterator* iter) const { + if (upper_bound_ == nullptr) { + return true; } + int cmp = icmp_->user_comparator()->Compare(iter->start_key().user_key, + *upper_bound_); + return upper_bound_inclusive_ ? cmp <= 0 : cmp < 0; + } - const Comparator* cmp; - }; - - typedef BinaryHeap IterMinHeap; + const InternalKeyComparator* icmp_; + const Slice* lower_bound_; + const Slice* upper_bound_; + bool upper_bound_inclusive_; + BinaryHeap heap_; + std::vector children_; - std::vector> iters_; - IterMinHeap heap_; - RangeDelIterator* current_; + mutable InternalKey cur_start_key_; }; -std::unique_ptr RangeDelAggregator::NewIterator() { - std::unique_ptr iter( - new MergingRangeDelIter(icmp_.user_comparator())); - if (rep_ != nullptr) { - for (const auto& stripe : rep_->stripe_map_) { - iter->AddIterator(stripe.second.first->NewIterator()); - } - } - return std::move(iter); +} // namespace + +std::unique_ptr +CompactionRangeDelAggregator::NewIterator(const Slice* lower_bound, + const Slice* upper_bound, + bool upper_bound_inclusive) { + InvalidateRangeDelMapPositions(); + std::unique_ptr merging_iter( + new TruncatedRangeDelMergingIter(icmp_, lower_bound, upper_bound, + upper_bound_inclusive, parent_iters_)); + + // TODO: add tests where tombstone fragments can be outside of upper and lower + // bound range + auto fragmented_tombstone_list = + std::make_shared( + std::move(merging_iter), *icmp_, true /* for_compaction */, + *snapshots_); + + return std::unique_ptr( + new FragmentedRangeTombstoneIterator( + fragmented_tombstone_list, *icmp_, + kMaxSequenceNumber /* upper_bound */)); } } // namespace rocksdb diff --git a/db/range_del_aggregator.h b/db/range_del_aggregator.h index 8a89ec9f1..a59cbaf1b 100644 --- a/db/range_del_aggregator.h +++ b/db/range_del_aggregator.h @@ -1,10 +1,12 @@ -// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). #pragma once +#include +#include #include #include #include @@ -14,220 +16,422 @@ #include "db/compaction_iteration_stats.h" #include "db/dbformat.h" #include "db/pinned_iterators_manager.h" +#include "db/range_del_aggregator.h" +#include "db/range_tombstone_fragmenter.h" #include "db/version_edit.h" #include "include/rocksdb/comparator.h" #include "include/rocksdb/types.h" #include "table/internal_iterator.h" #include "table/scoped_arena_iterator.h" #include "table/table_builder.h" +#include "util/heap.h" #include "util/kv_map.h" namespace rocksdb { -// RangeDelMaps maintain position across calls to ShouldDelete. The caller may -// wish to specify a mode to optimize positioning the iterator during the next -// call to ShouldDelete. The non-kFullScan modes are only available when -// deletion collapsing is enabled. -// -// For example, if we invoke Next() on an iterator, kForwardTraversal should be -// specified to advance one-by-one through deletions until one is found with its -// interval containing the key. This will typically be faster than doing a full -// binary search (kBinarySearch). -enum class RangeDelPositioningMode { - kFullScan, // used iff collapse_deletions_ == false - kForwardTraversal, - kBackwardTraversal, - kBinarySearch, +class TruncatedRangeDelIterator { + public: + TruncatedRangeDelIterator( + std::unique_ptr iter, + const InternalKeyComparator* icmp, const InternalKey* smallest, + const InternalKey* largest); + + bool Valid() const; + + void Next(); + void Prev(); + + void InternalNext(); + + // Seeks to the tombstone with the highest viisble sequence number that covers + // target (a user key). If no such tombstone exists, the position will be at + // the earliest tombstone that ends after target. + void Seek(const Slice& target); + + // Seeks to the tombstone with the highest viisble sequence number that covers + // target (a user key). If no such tombstone exists, the position will be at + // the latest tombstone that starts before target. + void SeekForPrev(const Slice& target); + + void SeekToFirst(); + void SeekToLast(); + + ParsedInternalKey start_key() const { + return (smallest_ == nullptr || + icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0) + ? iter_->parsed_start_key() + : *smallest_; + } + + ParsedInternalKey end_key() const { + return (largest_ == nullptr || + icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0) + ? iter_->parsed_end_key() + : *largest_; + } + + SequenceNumber seq() const { return iter_->seq(); } + + std::map> + SplitBySnapshot(const std::vector& snapshots); + + SequenceNumber upper_bound() const { return iter_->upper_bound(); } + + SequenceNumber lower_bound() const { return iter_->lower_bound(); } + + private: + std::unique_ptr iter_; + const InternalKeyComparator* icmp_; + const ParsedInternalKey* smallest_ = nullptr; + const ParsedInternalKey* largest_ = nullptr; + std::list pinned_bounds_; + + const InternalKey* smallest_ikey_; + const InternalKey* largest_ikey_; +}; + +struct SeqMaxComparator { + bool operator()(const TruncatedRangeDelIterator* a, + const TruncatedRangeDelIterator* b) const { + return a->seq() > b->seq(); + } }; -// TruncatedRangeTombstones are a slight generalization of regular -// RangeTombstones that can represent truncations caused by SST boundaries. -// Instead of using user keys to represent the start and end keys, they instead -// use internal keys, whose sequence number indicates the sequence number of -// the smallest/largest SST key (in the case where a tombstone is untruncated, -// the sequence numbers will be kMaxSequenceNumber for both start and end -// keys). Like RangeTombstones, TruncatedRangeTombstone are also -// end-key-exclusive. -struct TruncatedRangeTombstone { - TruncatedRangeTombstone(const ParsedInternalKey& sk, - const ParsedInternalKey& ek, SequenceNumber s) - : start_key_(sk), end_key_(ek), seq_(s) {} - - RangeTombstone Tombstone() const { - // The RangeTombstone returned here can cover less than the - // TruncatedRangeTombstone when its end key has a seqnum that is not - // kMaxSequenceNumber. Since this method is only used by RangeDelIterators - // (which in turn are only used during flush/compaction), we avoid this - // problem by using truncation boundaries spanning multiple SSTs, which - // are selected in a way that guarantee a clean break at the end key. - assert(end_key_.sequence == kMaxSequenceNumber); - return RangeTombstone(start_key_.user_key, end_key_.user_key, seq_); - } - - ParsedInternalKey start_key_; - ParsedInternalKey end_key_; - SequenceNumber seq_; +struct StartKeyMinComparator { + explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {} + + bool operator()(const TruncatedRangeDelIterator* a, + const TruncatedRangeDelIterator* b) const { + return icmp->Compare(a->start_key(), b->start_key()) > 0; + } + + const InternalKeyComparator* icmp; }; -// A RangeDelIterator iterates over range deletion tombstones. -class RangeDelIterator { +class ForwardRangeDelIterator { public: - virtual ~RangeDelIterator() = default; - - virtual bool Valid() const = 0; - virtual void Next() = 0; - // NOTE: the Slice passed to this method must be a user key. - virtual void Seek(const Slice& target) = 0; - virtual void Seek(const ParsedInternalKey& target) = 0; - virtual RangeTombstone Tombstone() const = 0; + ForwardRangeDelIterator( + const InternalKeyComparator* icmp, + const std::vector>* iters); + + bool ShouldDelete(const ParsedInternalKey& parsed); + void Invalidate(); + + void AddNewIter(TruncatedRangeDelIterator* iter, + const ParsedInternalKey& parsed) { + iter->Seek(parsed.user_key); + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); + } + + size_t UnusedIdx() const { return unused_idx_; } + void IncUnusedIdx() { unused_idx_++; } + + private: + using ActiveSeqSet = + std::multiset; + + struct EndKeyMinComparator { + explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {} + + bool operator()(const ActiveSeqSet::const_iterator& a, + const ActiveSeqSet::const_iterator& b) const { + return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0; + } + + const InternalKeyComparator* icmp; + }; + + void PushIter(TruncatedRangeDelIterator* iter, + const ParsedInternalKey& parsed) { + if (!iter->Valid()) { + // The iterator has been fully consumed, so we don't need to add it to + // either of the heaps. + return; + } + int cmp = icmp_->Compare(parsed, iter->start_key()); + if (cmp < 0) { + PushInactiveIter(iter); + } else { + PushActiveIter(iter); + } + } + + void PushActiveIter(TruncatedRangeDelIterator* iter) { + auto seq_pos = active_seqnums_.insert(iter); + active_iters_.push(seq_pos); + } + + TruncatedRangeDelIterator* PopActiveIter() { + auto active_top = active_iters_.top(); + auto iter = *active_top; + active_iters_.pop(); + active_seqnums_.erase(active_top); + return iter; + } + + void PushInactiveIter(TruncatedRangeDelIterator* iter) { + inactive_iters_.push(iter); + } + + TruncatedRangeDelIterator* PopInactiveIter() { + auto* iter = inactive_iters_.top(); + inactive_iters_.pop(); + return iter; + } + + const InternalKeyComparator* icmp_; + const std::vector>* iters_; + size_t unused_idx_; + ActiveSeqSet active_seqnums_; + BinaryHeap active_iters_; + BinaryHeap inactive_iters_; }; -// A RangeDelMap keeps track of range deletion tombstones within a snapshot -// stripe. -// -// RangeDelMaps are used internally by RangeDelAggregator. They are not intended -// to be used directly. -class RangeDelMap { +class ReverseRangeDelIterator { public: - virtual ~RangeDelMap() = default; + ReverseRangeDelIterator( + const InternalKeyComparator* icmp, + const std::vector>* iters); - virtual bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) = 0; - virtual bool IsRangeOverlapped(const ParsedInternalKey& start, - const ParsedInternalKey& end) = 0; - virtual void InvalidatePosition() = 0; + bool ShouldDelete(const ParsedInternalKey& parsed); + void Invalidate(); + + void AddNewIter(TruncatedRangeDelIterator* iter, + const ParsedInternalKey& parsed) { + iter->SeekForPrev(parsed.user_key); + PushIter(iter, parsed); + assert(active_iters_.size() == active_seqnums_.size()); + } + + size_t UnusedIdx() const { return unused_idx_; } + void IncUnusedIdx() { unused_idx_++; } + + private: + using ActiveSeqSet = + std::multiset; + + struct EndKeyMaxComparator { + explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {} + + bool operator()(const TruncatedRangeDelIterator* a, + const TruncatedRangeDelIterator* b) const { + return icmp->Compare(a->end_key(), b->end_key()) < 0; + } + + const InternalKeyComparator* icmp; + }; + struct StartKeyMaxComparator { + explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {} + + bool operator()(const ActiveSeqSet::const_iterator& a, + const ActiveSeqSet::const_iterator& b) const { + return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0; + } - virtual size_t Size() const = 0; - bool IsEmpty() const { return Size() == 0; } + const InternalKeyComparator* icmp; + }; + + void PushIter(TruncatedRangeDelIterator* iter, + const ParsedInternalKey& parsed) { + if (!iter->Valid()) { + // The iterator has been fully consumed, so we don't need to add it to + // either of the heaps. + } else if (icmp_->Compare(iter->end_key(), parsed) <= 0) { + PushInactiveIter(iter); + } else { + PushActiveIter(iter); + } + } + + void PushActiveIter(TruncatedRangeDelIterator* iter) { + auto seq_pos = active_seqnums_.insert(iter); + active_iters_.push(seq_pos); + } + + TruncatedRangeDelIterator* PopActiveIter() { + auto active_top = active_iters_.top(); + auto iter = *active_top; + active_iters_.pop(); + active_seqnums_.erase(active_top); + return iter; + } - virtual void AddTombstone(TruncatedRangeTombstone tombstone) = 0; - virtual std::unique_ptr NewIterator() = 0; + void PushInactiveIter(TruncatedRangeDelIterator* iter) { + inactive_iters_.push(iter); + } + + TruncatedRangeDelIterator* PopInactiveIter() { + auto* iter = inactive_iters_.top(); + inactive_iters_.pop(); + return iter; + } + + const InternalKeyComparator* icmp_; + const std::vector>* iters_; + size_t unused_idx_; + ActiveSeqSet active_seqnums_; + BinaryHeap active_iters_; + BinaryHeap inactive_iters_; }; -// A RangeDelAggregator aggregates range deletion tombstones as they are -// encountered in memtables/SST files. It provides methods that check whether a -// key is covered by range tombstones or write the relevant tombstones to a new -// SST file. +enum class RangeDelPositioningMode { kForwardTraversal, kBackwardTraversal }; class RangeDelAggregator { public: - // @param snapshots These are used to organize the tombstones into snapshot - // stripes, which is the seqnum range between consecutive snapshots, - // including the higher snapshot and excluding the lower one. Currently, - // this is used by ShouldDelete() to prevent deletion of keys that are - // covered by range tombstones in other snapshot stripes. This constructor - // is used for writes (flush/compaction). All DB snapshots are provided - // such that no keys are removed that are uncovered according to any DB - // snapshot. - // Note this overload does not lazily initialize Rep. - RangeDelAggregator(const InternalKeyComparator& icmp, - const std::vector& snapshots, - bool collapse_deletions = true); - - // @param upper_bound Similar to snapshots above, except with a single - // snapshot, which allows us to store the snapshot on the stack and defer - // initialization of heap-allocating members (in Rep) until the first range - // deletion is encountered. This constructor is used in case of reads (get/ - // iterator), for which only the user snapshot (upper_bound) is provided - // such that the seqnum space is divided into two stripes. Only the older - // stripe will be used by ShouldDelete(). - RangeDelAggregator(const InternalKeyComparator& icmp, - SequenceNumber upper_bound, - bool collapse_deletions = false); - - // Returns whether the key should be deleted, which is the case when it is - // covered by a range tombstone residing in the same snapshot stripe. - // @param mode If collapse_deletions_ is true, this dictates how we will find - // the deletion whose interval contains this key. Otherwise, its - // value must be kFullScan indicating linear scan from beginning. - bool ShouldDelete( - const ParsedInternalKey& parsed, - RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) { - if (rep_ == nullptr) { + explicit RangeDelAggregator(const InternalKeyComparator* icmp) + : icmp_(icmp) {} + virtual ~RangeDelAggregator() {} + + virtual void AddTombstones( + std::unique_ptr input_iter, + const InternalKey* smallest = nullptr, + const InternalKey* largest = nullptr) = 0; + + bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) { + ParsedInternalKey parsed; + if (!ParseInternalKey(key, &parsed)) { return false; } - return ShouldDeleteImpl(parsed, mode); + return ShouldDelete(parsed, mode); } - bool ShouldDelete( - const Slice& internal_key, - RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) { - if (rep_ == nullptr) { - return false; + virtual bool ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode) = 0; + + virtual void InvalidateRangeDelMapPositions() = 0; + + virtual bool IsEmpty() const = 0; + + bool AddFile(uint64_t file_number) { + return files_seen_.insert(file_number).second; + } + + protected: + class StripeRep { + public: + StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound, + SequenceNumber lower_bound) + : icmp_(icmp), + forward_iter_(icmp, &iters_), + reverse_iter_(icmp, &iters_), + upper_bound_(upper_bound), + lower_bound_(lower_bound) {} + + void AddTombstones(std::unique_ptr input_iter) { + iters_.push_back(std::move(input_iter)); } - return ShouldDeleteImpl(internal_key, mode); - } - bool ShouldDeleteImpl(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode); - bool ShouldDeleteImpl(const Slice& internal_key, - RangeDelPositioningMode mode); - - // Checks whether range deletions cover any keys between `start` and `end`, - // inclusive. - // - // @param start User key representing beginning of range to check for overlap. - // @param end User key representing end of range to check for overlap. This - // argument is inclusive, so the existence of a range deletion covering - // `end` causes this to return true. + + bool IsEmpty() const { return iters_.empty(); } + + bool ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode); + + void Invalidate() { + InvalidateForwardIter(); + InvalidateReverseIter(); + } + + bool IsRangeOverlapped(const Slice& start, const Slice& end); + + private: + bool InStripe(SequenceNumber seq) const { + return lower_bound_ <= seq && seq <= upper_bound_; + } + + void InvalidateForwardIter() { forward_iter_.Invalidate(); } + + void InvalidateReverseIter() { reverse_iter_.Invalidate(); } + + const InternalKeyComparator* icmp_; + std::vector> iters_; + ForwardRangeDelIterator forward_iter_; + ReverseRangeDelIterator reverse_iter_; + SequenceNumber upper_bound_; + SequenceNumber lower_bound_; + }; + + const InternalKeyComparator* icmp_; + + private: + std::set files_seen_; +}; + +class ReadRangeDelAggregator : public RangeDelAggregator { + public: + ReadRangeDelAggregator(const InternalKeyComparator* icmp, + SequenceNumber upper_bound) + : RangeDelAggregator(icmp), + rep_(icmp, upper_bound, 0 /* lower_bound */) {} + ~ReadRangeDelAggregator() override {} + + using RangeDelAggregator::ShouldDelete; + void AddTombstones( + std::unique_ptr input_iter, + const InternalKey* smallest = nullptr, + const InternalKey* largest = nullptr) override; + + bool ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode) override; + bool IsRangeOverlapped(const Slice& start, const Slice& end); - // Adds tombstones to the tombstone aggregation structure maintained by this - // object. Tombstones are truncated to smallest and largest. If smallest (or - // largest) is null, it is not used for truncation. When adding range - // tombstones present in an sstable, smallest and largest should be set to - // the smallest and largest keys from the sstable file metadata. Note that - // tombstones end keys are exclusive while largest is inclusive. - // @return non-OK status if any of the tombstone keys are corrupted. - Status AddTombstones(std::unique_ptr input, - const InternalKey* smallest = nullptr, - const InternalKey* largest = nullptr); - - // Resets iterators maintained across calls to ShouldDelete(). This may be - // called when the tombstones change, or the owner may call explicitly, e.g., - // if it's an iterator that just seeked to an arbitrary position. The effect - // of invalidation is that the following call to ShouldDelete() will binary - // search for its tombstone. - void InvalidateRangeDelMapPositions(); - - bool IsEmpty(); - bool AddFile(uint64_t file_number); - - // Create a new iterator over the range deletion tombstones in all of the - // snapshot stripes in this aggregator. Tombstones are presented in start key - // order. Tombstones with the same start key are presented in arbitrary order. - // - // The iterator is invalidated after any call to AddTombstones. It is the - // caller's responsibility to avoid using invalid iterators. - std::unique_ptr NewIterator(); + void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); } + + bool IsEmpty() const override { return rep_.IsEmpty(); } private: - // Maps snapshot seqnum -> map of tombstones that fall in that stripe, i.e., - // their seqnums are greater than the next smaller snapshot's seqnum, and the - // corresponding index into the list of snapshots. Each entry is lazily - // initialized. - typedef std::map, size_t>> - StripeMap; - - struct Rep { - std::vector snapshots_; - StripeMap stripe_map_; - PinnedIteratorsManager pinned_iters_mgr_; - std::list pinned_slices_; - std::set added_files_; - }; - // Initializes rep_ lazily. This aggregator object is constructed for every - // read, so expensive members should only be created when necessary, i.e., - // once the first range deletion is encountered. - void InitRep(const std::vector& snapshots); - - std::unique_ptr NewRangeDelMap(); - RangeDelMap* GetRangeDelMapIfExists(SequenceNumber seq); - RangeDelMap& GetRangeDelMap(SequenceNumber seq); - - SequenceNumber upper_bound_; - std::unique_ptr rep_; - const InternalKeyComparator& icmp_; - // collapse range deletions so they're binary searchable - const bool collapse_deletions_; + StripeRep rep_; +}; + +class CompactionRangeDelAggregator : public RangeDelAggregator { + public: + CompactionRangeDelAggregator(const InternalKeyComparator* icmp, + const std::vector& snapshots) + : RangeDelAggregator(icmp), snapshots_(&snapshots) {} + ~CompactionRangeDelAggregator() override {} + + void AddTombstones( + std::unique_ptr input_iter, + const InternalKey* smallest = nullptr, + const InternalKey* largest = nullptr) override; + + using RangeDelAggregator::ShouldDelete; + bool ShouldDelete(const ParsedInternalKey& parsed, + RangeDelPositioningMode mode) override; + + bool IsRangeOverlapped(const Slice& start, const Slice& end); + + void InvalidateRangeDelMapPositions() override { + for (auto& rep : reps_) { + rep.second.Invalidate(); + } + } + + bool IsEmpty() const override { + for (const auto& rep : reps_) { + if (!rep.second.IsEmpty()) { + return false; + } + } + return true; + } + + // Creates an iterator over all the range tombstones in the aggregator, for + // use in compaction. Nullptr arguments indicate that the iterator range is + // unbounded. + // NOTE: the boundaries are used for optimization purposes to reduce the + // number of tombstones that are passed to the fragmenter; they do not + // guarantee that the resulting iterator only contains range tombstones that + // cover keys in the provided range. If required, these bounds must be + // enforced during iteration. + std::unique_ptr NewIterator( + const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr, + bool upper_bound_inclusive = false); + + private: + std::vector> parent_iters_; + std::map reps_; + + const std::vector* snapshots_; }; } // namespace rocksdb diff --git a/db/range_del_aggregator_bench.cc b/db/range_del_aggregator_bench.cc index 0b8260960..7ecdbc5af 100644 --- a/db/range_del_aggregator_bench.cc +++ b/db/range_del_aggregator_bench.cc @@ -20,7 +20,6 @@ int main() { #include #include "db/range_del_aggregator.h" -#include "db/range_del_aggregator_v2.h" #include "db/range_tombstone_fragmenter.h" #include "rocksdb/comparator.h" #include "rocksdb/env.h" @@ -48,8 +47,6 @@ DEFINE_double(tombstone_width_mean, 100.0, "average range tombstone width"); DEFINE_double(tombstone_width_stddev, 0.0, "standard deviation of range tombstone width"); -DEFINE_bool(use_collapsed, true, "use the collapsed range tombstone map"); - DEFINE_int32(seed, 0, "random number generator seed"); DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run"); @@ -57,8 +54,6 @@ DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run"); DEFINE_int32(add_tombstones_per_run, 1, "number of AddTombstones calls per run"); -DEFINE_bool(use_v2_aggregator, false, "benchmark RangeDelAggregatorV2"); - namespace { struct Stats { @@ -187,14 +182,10 @@ int main(int argc, char** argv) { std::vector( FLAGS_num_range_tombstones); } - auto mode = FLAGS_use_collapsed - ? rocksdb::RangeDelPositioningMode::kForwardTraversal - : rocksdb::RangeDelPositioningMode::kFullScan; + auto mode = rocksdb::RangeDelPositioningMode::kForwardTraversal; for (int i = 0; i < FLAGS_num_runs; i++) { - rocksdb::RangeDelAggregator range_del_agg(icmp, {} /* snapshots */, - FLAGS_use_collapsed); - rocksdb::ReadRangeDelAggregatorV2 range_del_agg_v2( + rocksdb::ReadRangeDelAggregator range_del_agg( &icmp, rocksdb::kMaxSequenceNumber /* upper_bound */); std::vector > @@ -223,17 +214,10 @@ int main(int argc, char** argv) { fragmented_range_tombstone_lists.back().get(), icmp, rocksdb::kMaxSequenceNumber)); - if (FLAGS_use_v2_aggregator) { - rocksdb::StopWatchNano stop_watch_add_tombstones( - rocksdb::Env::Default(), true /* auto_start */); - range_del_agg_v2.AddTombstones(std::move(fragmented_range_del_iter)); - stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos(); - } else { - rocksdb::StopWatchNano stop_watch_add_tombstones( - rocksdb::Env::Default(), true /* auto_start */); - range_del_agg.AddTombstones(std::move(range_del_iter)); - stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos(); - } + rocksdb::StopWatchNano stop_watch_add_tombstones(rocksdb::Env::Default(), + true /* auto_start */); + range_del_agg.AddTombstones(std::move(fragmented_range_del_iter)); + stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos(); } rocksdb::ParsedInternalKey parsed_key; @@ -247,18 +231,10 @@ int main(int argc, char** argv) { std::string key_string = rocksdb::Key(first_key + j); parsed_key.user_key = key_string; - uint64_t call_time; - if (FLAGS_use_v2_aggregator) { - rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(), - true /* auto_start */); - range_del_agg_v2.ShouldDelete(parsed_key, mode); - call_time = stop_watch_should_delete.ElapsedNanos(); - } else { - rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(), - true /* auto_start */); - range_del_agg.ShouldDelete(parsed_key, mode); - call_time = stop_watch_should_delete.ElapsedNanos(); - } + rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(), + true /* auto_start */); + range_del_agg.ShouldDelete(parsed_key, mode); + uint64_t call_time = stop_watch_should_delete.ElapsedNanos(); if (j == 0) { stats.time_first_should_delete += call_time; diff --git a/db/range_del_aggregator_test.cc b/db/range_del_aggregator_test.cc index 2cfc6540e..28c8129ec 100644 --- a/db/range_del_aggregator_test.cc +++ b/db/range_del_aggregator_test.cc @@ -1,13 +1,17 @@ -// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include +#include "db/range_del_aggregator.h" + +#include +#include +#include #include "db/db_test_util.h" -#include "db/range_del_aggregator.h" -#include "rocksdb/comparator.h" +#include "db/dbformat.h" +#include "db/range_tombstone_fragmenter.h" #include "util/testutil.h" namespace rocksdb { @@ -16,452 +20,685 @@ class RangeDelAggregatorTest : public testing::Test {}; namespace { -struct ExpectedPoint { - Slice begin; - SequenceNumber seq; - bool expectAlive; -}; - -enum Direction { - kForward, - kReverse, -}; - -struct AddTombstonesArgs { - const std::vector tombstones; - const InternalKey* smallest; - const InternalKey* largest; -}; - static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator()); -void AddTombstones(RangeDelAggregator* range_del_agg, - const std::vector& range_dels, - const InternalKey* smallest = nullptr, - const InternalKey* largest = nullptr) { +std::unique_ptr MakeRangeDelIter( + const std::vector& range_dels) { std::vector keys, values; for (const auto& range_del : range_dels) { auto key_and_value = range_del.Serialize(); keys.push_back(key_and_value.first.Encode().ToString()); values.push_back(key_and_value.second.ToString()); } - std::unique_ptr range_del_iter( + return std::unique_ptr( new test::VectorIterator(keys, values)); - range_del_agg->AddTombstones(std::move(range_del_iter), smallest, largest); } -void VerifyTombstonesEq(const RangeTombstone& a, const RangeTombstone& b) { - ASSERT_EQ(a.seq_, b.seq_); - ASSERT_EQ(a.start_key_, b.start_key_); - ASSERT_EQ(a.end_key_, b.end_key_); -} - -void VerifyRangeDelIter( - RangeDelIterator* range_del_iter, - const std::vector& expected_range_dels) { - size_t i = 0; - for (; range_del_iter->Valid(); range_del_iter->Next(), i++) { - VerifyTombstonesEq(expected_range_dels[i], range_del_iter->Tombstone()); +std::vector> +MakeFragmentedTombstoneLists( + const std::vector>& range_dels_list) { + std::vector> fragment_lists; + for (const auto& range_dels : range_dels_list) { + auto range_del_iter = MakeRangeDelIter(range_dels); + fragment_lists.emplace_back(new FragmentedRangeTombstoneList( + std::move(range_del_iter), bytewise_icmp)); } - ASSERT_EQ(expected_range_dels.size(), i); - ASSERT_FALSE(range_del_iter->Valid()); + return fragment_lists; } -void VerifyRangeDels( - const std::vector& all_args, - const std::vector& expected_points, - const std::vector& expected_collapsed_range_dels, - const InternalKeyComparator& icmp = bytewise_icmp) { - // Test same result regardless of which order the range deletions are added - // and regardless of collapsed mode. - for (bool collapsed : {false, true}) { - for (Direction dir : {kForward, kReverse}) { - RangeDelAggregator range_del_agg(icmp, {} /* snapshots */, collapsed); - std::vector all_range_dels; - - for (const auto& args : all_args) { - std::vector range_dels = args.tombstones; - if (dir == kReverse) { - std::reverse(range_dels.begin(), range_dels.end()); - } - all_range_dels.insert(all_range_dels.end(), range_dels.begin(), - range_dels.end()); - AddTombstones(&range_del_agg, range_dels, args.smallest, args.largest); - } - - auto mode = RangeDelPositioningMode::kFullScan; - if (collapsed) { - mode = RangeDelPositioningMode::kForwardTraversal; - } - - for (const auto expected_point : expected_points) { - ParsedInternalKey parsed_key; - parsed_key.user_key = expected_point.begin; - parsed_key.sequence = expected_point.seq; - parsed_key.type = kTypeValue; - std::string ikey; - AppendInternalKey(&ikey, parsed_key); - ASSERT_FALSE(range_del_agg.ShouldDelete(ikey, mode)); - if (parsed_key.sequence > 0) { - --parsed_key.sequence; - ikey.clear(); - AppendInternalKey(&ikey, parsed_key); - if (expected_point.expectAlive) { - ASSERT_FALSE(range_del_agg.ShouldDelete(ikey, mode)); - } else { - ASSERT_TRUE(range_del_agg.ShouldDelete(ikey, mode)); - } - } - } - - if (collapsed) { - all_range_dels = expected_collapsed_range_dels; - VerifyRangeDelIter(range_del_agg.NewIterator().get(), all_range_dels); - } else if (all_args.size() == 1 && all_args[0].smallest == nullptr && - all_args[0].largest == nullptr) { - // Tombstones in an uncollapsed map are presented in start key - // order. Tombstones with the same start key are presented in - // insertion order. We don't handle tombstone truncation here, so the - // verification is only performed if no truncation was requested. - std::stable_sort(all_range_dels.begin(), all_range_dels.end(), - [&](const RangeTombstone& a, const RangeTombstone& b) { - return icmp.user_comparator()->Compare( - a.start_key_, b.start_key_) < 0; - }); - VerifyRangeDelIter(range_del_agg.NewIterator().get(), all_range_dels); - } - } - } - - RangeDelAggregator range_del_agg(icmp, {} /* snapshots */, - false /* collapse_deletions */); - for (const auto& args : all_args) { - AddTombstones(&range_del_agg, args.tombstones, args.smallest, args.largest); - } - for (size_t i = 1; i < expected_points.size(); ++i) { - bool overlapped = range_del_agg.IsRangeOverlapped( - expected_points[i - 1].begin, expected_points[i].begin); - if (expected_points[i - 1].seq > 0 || expected_points[i].seq > 0) { - ASSERT_TRUE(overlapped); - } else { - ASSERT_FALSE(overlapped); - } - } -} - -} // anonymous namespace - -TEST_F(RangeDelAggregatorTest, Empty) { VerifyRangeDels({}, {{"a", 0}}, {}); } - -TEST_F(RangeDelAggregatorTest, SameStartAndEnd) { - VerifyRangeDels({{{{"a", "a", 5}}}}, {{" ", 0}, {"a", 0}, {"b", 0}}, {}); -} - -TEST_F(RangeDelAggregatorTest, Single) { - VerifyRangeDels({{{{"a", "b", 10}}}}, {{" ", 0}, {"a", 10}, {"b", 0}}, - {{"a", "b", 10}}); -} - -TEST_F(RangeDelAggregatorTest, OverlapAboveLeft) { - VerifyRangeDels({{{{"a", "c", 10}, {"b", "d", 5}}}}, - {{" ", 0}, {"a", 10}, {"c", 5}, {"d", 0}}, - {{"a", "c", 10}, {"c", "d", 5}}); -} - -TEST_F(RangeDelAggregatorTest, OverlapAboveRight) { - VerifyRangeDels({{{{"a", "c", 5}, {"b", "d", 10}}}}, - {{" ", 0}, {"a", 5}, {"b", 10}, {"d", 0}}, - {{"a", "b", 5}, {"b", "d", 10}}); -} - -TEST_F(RangeDelAggregatorTest, OverlapAboveMiddle) { - VerifyRangeDels({{{{"a", "d", 5}, {"b", "c", 10}}}}, - {{" ", 0}, {"a", 5}, {"b", 10}, {"c", 5}, {"d", 0}}, - {{"a", "b", 5}, {"b", "c", 10}, {"c", "d", 5}}); -} - -TEST_F(RangeDelAggregatorTest, OverlapAboveMiddleReverse) { - VerifyRangeDels({{{{"d", "a", 5}, {"c", "b", 10}}}}, - {{"z", 0}, {"d", 5}, {"c", 10}, {"b", 5}, {"a", 0}}, - {{"d", "c", 5}, {"c", "b", 10}, {"b", "a", 5}}, - InternalKeyComparator(ReverseBytewiseComparator())); -} - -TEST_F(RangeDelAggregatorTest, OverlapFully) { - VerifyRangeDels({{{{"a", "d", 10}, {"b", "c", 5}}}}, - {{" ", 0}, {"a", 10}, {"d", 0}}, {{"a", "d", 10}}); -} +struct TruncatedIterScanTestCase { + ParsedInternalKey start; + ParsedInternalKey end; + SequenceNumber seq; +}; -TEST_F(RangeDelAggregatorTest, OverlapPoint) { - VerifyRangeDels({{{{"a", "b", 5}, {"b", "c", 10}}}}, - {{" ", 0}, {"a", 5}, {"b", 10}, {"c", 0}}, - {{"a", "b", 5}, {"b", "c", 10}}); -} +struct TruncatedIterSeekTestCase { + Slice target; + ParsedInternalKey start; + ParsedInternalKey end; + SequenceNumber seq; + bool invalid; +}; -TEST_F(RangeDelAggregatorTest, SameStartKey) { - VerifyRangeDels({{{{"a", "c", 5}, {"a", "b", 10}}}}, - {{" ", 0}, {"a", 10}, {"b", 5}, {"c", 0}}, - {{"a", "b", 10}, {"b", "c", 5}}); -} +struct ShouldDeleteTestCase { + ParsedInternalKey lookup_key; + bool result; +}; -TEST_F(RangeDelAggregatorTest, SameEndKey) { - VerifyRangeDels({{{{"a", "d", 5}, {"b", "d", 10}}}}, - {{" ", 0}, {"a", 5}, {"b", 10}, {"d", 0}}, - {{"a", "b", 5}, {"b", "d", 10}}); -} +struct IsRangeOverlappedTestCase { + Slice start; + Slice end; + bool result; +}; -TEST_F(RangeDelAggregatorTest, GapsBetweenRanges) { - VerifyRangeDels({{{{"a", "b", 5}, {"c", "d", 10}, {"e", "f", 15}}}}, - {{" ", 0}, - {"a", 5}, - {"b", 0}, - {"c", 10}, - {"d", 0}, - {"da", 0}, - {"e", 15}, - {"f", 0}}, - {{"a", "b", 5}, {"c", "d", 10}, {"e", "f", 15}}); +ParsedInternalKey UncutEndpoint(const Slice& s) { + return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion); } -TEST_F(RangeDelAggregatorTest, IdenticalSameSeqNo) { - VerifyRangeDels({{{{"a", "b", 5}, {"a", "b", 5}}}}, - {{" ", 0}, {"a", 5}, {"b", 0}}, - {{"a", "b", 5}}); +ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq) { + return ParsedInternalKey(key, seq, kTypeValue); } -TEST_F(RangeDelAggregatorTest, ContiguousSameSeqNo) { - VerifyRangeDels({{{{"a", "b", 5}, {"b", "c", 5}}}}, - {{" ", 0}, {"a", 5}, {"b", 5}, {"c", 0}}, - {{"a", "c", 5}}); +void VerifyIterator( + TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp, + const std::vector& expected_range_dels) { + // Test forward iteration. + iter->SeekToFirst(); + for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) { + ASSERT_TRUE(iter->Valid()); + EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start)); + EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end)); + EXPECT_EQ(expected_range_dels[i].seq, iter->seq()); + } + EXPECT_FALSE(iter->Valid()); + + // Test reverse iteration. + iter->SeekToLast(); + std::vector reverse_expected_range_dels( + expected_range_dels.rbegin(), expected_range_dels.rend()); + for (size_t i = 0; i < reverse_expected_range_dels.size(); + i++, iter->Prev()) { + ASSERT_TRUE(iter->Valid()); + EXPECT_EQ(0, icmp.Compare(iter->start_key(), + reverse_expected_range_dels[i].start)); + EXPECT_EQ( + 0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end)); + EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq()); + } + EXPECT_FALSE(iter->Valid()); } -TEST_F(RangeDelAggregatorTest, OverlappingSameSeqNo) { - VerifyRangeDels({{{{"a", "c", 5}, {"b", "d", 5}}}}, - {{" ", 0}, {"a", 5}, {"b", 5}, {"c", 5}, {"d", 0}}, - {{"a", "d", 5}}); +void VerifySeek(TruncatedRangeDelIterator* iter, + const InternalKeyComparator& icmp, + const std::vector& test_cases) { + for (const auto& test_case : test_cases) { + iter->Seek(test_case.target); + if (test_case.invalid) { + ASSERT_FALSE(iter->Valid()); + } else { + ASSERT_TRUE(iter->Valid()); + EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start)); + EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end)); + EXPECT_EQ(test_case.seq, iter->seq()); + } + } } -TEST_F(RangeDelAggregatorTest, CoverSameSeqNo) { - VerifyRangeDels({{{{"a", "d", 5}, {"b", "c", 5}}}}, - {{" ", 0}, {"a", 5}, {"b", 5}, {"c", 5}, {"d", 0}}, - {{"a", "d", 5}}); +void VerifySeekForPrev( + TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp, + const std::vector& test_cases) { + for (const auto& test_case : test_cases) { + iter->SeekForPrev(test_case.target); + if (test_case.invalid) { + ASSERT_FALSE(iter->Valid()); + } else { + ASSERT_TRUE(iter->Valid()); + EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start)); + EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end)); + EXPECT_EQ(test_case.seq, iter->seq()); + } + } } -// Note the Cover* tests also test cases where tombstones are inserted under a -// larger one when VerifyRangeDels() runs them in reverse -TEST_F(RangeDelAggregatorTest, CoverMultipleFromLeft) { - VerifyRangeDels( - {{{{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"a", "f", 20}}}}, - {{" ", 0}, {"a", 20}, {"f", 15}, {"g", 0}}, - {{"a", "f", 20}, {"f", "g", 15}}); +void VerifyShouldDelete(RangeDelAggregator* range_del_agg, + const std::vector& test_cases) { + for (const auto& test_case : test_cases) { + EXPECT_EQ( + test_case.result, + range_del_agg->ShouldDelete( + test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal)); + } + for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) { + const auto& test_case = *it; + EXPECT_EQ( + test_case.result, + range_del_agg->ShouldDelete( + test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal)); + } } -TEST_F(RangeDelAggregatorTest, CoverMultipleFromRight) { - VerifyRangeDels( - {{{{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"c", "h", 20}}}}, - {{" ", 0}, {"b", 5}, {"c", 20}, {"h", 0}}, - {{"b", "c", 5}, {"c", "h", 20}}); +void VerifyIsRangeOverlapped( + ReadRangeDelAggregator* range_del_agg, + const std::vector& test_cases) { + for (const auto& test_case : test_cases) { + EXPECT_EQ(test_case.result, + range_del_agg->IsRangeOverlapped(test_case.start, test_case.end)); + } } -TEST_F(RangeDelAggregatorTest, CoverMultipleFully) { - VerifyRangeDels( - {{{{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"a", "h", 20}}}}, - {{" ", 0}, {"a", 20}, {"h", 0}}, {{"a", "h", 20}}); -} +void CheckIterPosition(const RangeTombstone& tombstone, + const FragmentedRangeTombstoneIterator* iter) { + // Test InternalIterator interface. + EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key())); + EXPECT_EQ(tombstone.end_key_, iter->value()); + EXPECT_EQ(tombstone.seq_, iter->seq()); -TEST_F(RangeDelAggregatorTest, AlternateMultipleAboveBelow) { - VerifyRangeDels( - {{{{"b", "d", 15}, {"c", "f", 10}, {"e", "g", 20}, {"a", "h", 5}}}}, - {{" ", 0}, {"a", 5}, {"b", 15}, {"d", 10}, {"e", 20}, {"g", 5}, {"h", 0}}, - {{"a", "b", 5}, - {"b", "d", 15}, - {"d", "e", 10}, - {"e", "g", 20}, - {"g", "h", 5}}); + // Test FragmentedRangeTombstoneIterator interface. + EXPECT_EQ(tombstone.start_key_, iter->start_key()); + EXPECT_EQ(tombstone.end_key_, iter->end_key()); + EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key())); } -TEST_F(RangeDelAggregatorTest, MergingIteratorAllEmptyStripes) { - for (bool collapsed : {true, false}) { - RangeDelAggregator range_del_agg(bytewise_icmp, {1, 2}, collapsed); - VerifyRangeDelIter(range_del_agg.NewIterator().get(), {}); +void VerifyFragmentedRangeDels( + FragmentedRangeTombstoneIterator* iter, + const std::vector& expected_tombstones) { + iter->SeekToFirst(); + for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) { + ASSERT_TRUE(iter->Valid()); + CheckIterPosition(expected_tombstones[i], iter); } -} - -TEST_F(RangeDelAggregatorTest, MergingIteratorOverlappingStripes) { - for (bool collapsed : {true, false}) { - RangeDelAggregator range_del_agg(bytewise_icmp, {5, 15, 25, 35}, collapsed); - AddTombstones( - &range_del_agg, - {{"d", "e", 10}, {"aa", "b", 20}, {"c", "d", 30}, {"a", "b", 10}}); - VerifyRangeDelIter( - range_del_agg.NewIterator().get(), - {{"a", "b", 10}, {"aa", "b", 20}, {"c", "d", 30}, {"d", "e", 10}}); + EXPECT_FALSE(iter->Valid()); +} + +} // namespace + +TEST_F(RangeDelAggregatorTest, EmptyTruncatedIter) { + auto range_del_iter = MakeRangeDelIter({}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + kMaxSequenceNumber)); + + TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, + nullptr); + + iter.SeekToFirst(); + ASSERT_FALSE(iter.Valid()); + + iter.SeekToLast(); + ASSERT_FALSE(iter.Valid()); +} + +TEST_F(RangeDelAggregatorTest, UntruncatedIter) { + auto range_del_iter = + MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + kMaxSequenceNumber)); + + TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, + nullptr); + + VerifyIterator(&iter, bytewise_icmp, + {{UncutEndpoint("a"), UncutEndpoint("e"), 10}, + {UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {UncutEndpoint("j"), UncutEndpoint("n"), 4}}); + + VerifySeek( + &iter, bytewise_icmp, + {{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4}, + {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, + {"", UncutEndpoint("a"), UncutEndpoint("e"), 10}}); + + VerifySeekForPrev( + &iter, bytewise_icmp, + {{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"n", UncutEndpoint("j"), UncutEndpoint("n"), 4}, + {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); +} + +TEST_F(RangeDelAggregatorTest, UntruncatedIterWithSnapshot) { + auto range_del_iter = + MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + 9 /* snapshot */)); + + TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, + nullptr); + + VerifyIterator(&iter, bytewise_icmp, + {{UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {UncutEndpoint("j"), UncutEndpoint("n"), 4}}); + + VerifySeek( + &iter, bytewise_icmp, + {{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4}, + {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, + {"", UncutEndpoint("e"), UncutEndpoint("g"), 8}}); + + VerifySeekForPrev( + &iter, bytewise_icmp, + {{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"n", UncutEndpoint("j"), UncutEndpoint("n"), 4}, + {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); +} + +TEST_F(RangeDelAggregatorTest, TruncatedIterPartiallyCutTombstones) { + auto range_del_iter = + MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + kMaxSequenceNumber)); + + InternalKey smallest("d", 7, kTypeValue); + InternalKey largest("m", 9, kTypeValue); + TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, + &smallest, &largest); + + VerifyIterator(&iter, bytewise_icmp, + {{InternalValue("d", 7), UncutEndpoint("e"), 10}, + {UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {UncutEndpoint("j"), InternalValue("m", 8), 4}}); + + VerifySeek( + &iter, bytewise_icmp, + {{"d", InternalValue("d", 7), UncutEndpoint("e"), 10}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("j"), InternalValue("m", 8), 4}, + {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, + {"", InternalValue("d", 7), UncutEndpoint("e"), 10}}); + + VerifySeekForPrev( + &iter, bytewise_icmp, + {{"d", InternalValue("d", 7), UncutEndpoint("e"), 10}, + {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, + {"n", UncutEndpoint("j"), InternalValue("m", 8), 4}, + {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); +} + +TEST_F(RangeDelAggregatorTest, TruncatedIterFullyCutTombstones) { + auto range_del_iter = + MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + kMaxSequenceNumber)); + + InternalKey smallest("f", 7, kTypeValue); + InternalKey largest("i", 9, kTypeValue); + TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, + &smallest, &largest); + + VerifyIterator(&iter, bytewise_icmp, + {{InternalValue("f", 7), UncutEndpoint("g"), 8}}); + + VerifySeek( + &iter, bytewise_icmp, + {{"d", InternalValue("f", 7), UncutEndpoint("g"), 8}, + {"f", InternalValue("f", 7), UncutEndpoint("g"), 8}, + {"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); + + VerifySeekForPrev( + &iter, bytewise_icmp, + {{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, + {"f", InternalValue("f", 7), UncutEndpoint("g"), 8}, + {"j", InternalValue("f", 7), UncutEndpoint("g"), 8}}); +} + +TEST_F(RangeDelAggregatorTest, SingleIterInAggregator) { + auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}}); + FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), + bytewise_icmp); + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, + kMaxSequenceNumber)); + + ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber); + range_del_agg.AddTombstones(std::move(input_iter)); + + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false}, + {InternalValue("b", 9), true}, + {InternalValue("d", 9), true}, + {InternalValue("e", 7), true}, + {InternalValue("g", 7), false}}); + + VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, + {"_", "a", true}, + {"a", "c", true}, + {"d", "f", true}, + {"g", "l", false}}); +} + +TEST_F(RangeDelAggregatorTest, MultipleItersInAggregator) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); } -} -TEST_F(RangeDelAggregatorTest, MergingIteratorSeek) { - RangeDelAggregator range_del_agg(bytewise_icmp, {5, 15}, - true /* collapsed */); - AddTombstones(&range_del_agg, {{"a", "c", 10}, - {"b", "c", 11}, - {"f", "g", 10}, - {"c", "d", 20}, - {"e", "f", 20}}); - auto it = range_del_agg.NewIterator(); - - // Verify seek positioning. - it->Seek(""); - VerifyTombstonesEq(it->Tombstone(), {"a", "b", 10}); - it->Seek("a"); - VerifyTombstonesEq(it->Tombstone(), {"a", "b", 10}); - it->Seek("aa"); - VerifyTombstonesEq(it->Tombstone(), {"a", "b", 10}); - it->Seek("b"); - VerifyTombstonesEq(it->Tombstone(), {"b", "c", 11}); - it->Seek("c"); - VerifyTombstonesEq(it->Tombstone(), {"c", "d", 20}); - it->Seek("dd"); - VerifyTombstonesEq(it->Tombstone(), {"e", "f", 20}); - it->Seek("f"); - VerifyTombstonesEq(it->Tombstone(), {"f", "g", 10}); - it->Seek("g"); - ASSERT_EQ(it->Valid(), false); - it->Seek("h"); - ASSERT_EQ(it->Valid(), false); - - // Verify iteration after seek. - it->Seek("c"); - VerifyRangeDelIter(it.get(), - {{"c", "d", 20}, {"e", "f", 20}, {"f", "g", 10}}); -} + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true}, + {InternalValue("b", 19), false}, + {InternalValue("b", 9), true}, + {InternalValue("d", 9), true}, + {InternalValue("e", 7), true}, + {InternalValue("g", 7), false}, + {InternalValue("h", 24), true}, + {InternalValue("i", 24), false}, + {InternalValue("ii", 14), true}, + {InternalValue("j", 14), false}}); + + VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, + {"_", "a", true}, + {"a", "c", true}, + {"d", "f", true}, + {"g", "l", true}, + {"x", "y", false}}); +} + +TEST_F(RangeDelAggregatorTest, MultipleItersInAggregatorWithUpperBound) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + 19 /* snapshot */)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, TruncateTombstones) { - const InternalKey smallest("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"a", "c", 10}, {"d", "f", 10}}, &smallest, &largest}}, - {{"a", 10, true}, // truncated - {"b", 10, false}, // not truncated - {"d", 10, false}, // not truncated - {"e", 10, true}}, // truncated - {{"b", "c", 10}, {"d", "e", 10}}); -} + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false}, + {InternalValue("a", 9), true}, + {InternalValue("b", 9), true}, + {InternalValue("d", 9), true}, + {InternalValue("e", 7), true}, + {InternalValue("g", 7), false}, + {InternalValue("h", 24), false}, + {InternalValue("i", 24), false}, + {InternalValue("ii", 14), true}, + {InternalValue("j", 14), false}}); + + VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, + {"_", "a", true}, + {"a", "c", true}, + {"d", "f", true}, + {"g", "l", true}, + {"x", "y", false}}); +} + +TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregator) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}}); + std::vector> iter_bounds = { + {InternalKey("a", 4, kTypeValue), + InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)}, + {InternalKey("m", 20, kTypeValue), + InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)}, + {InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}}; + + ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19); + for (size_t i = 0; i < fragment_lists.size(); i++) { + const auto& fragment_list = fragment_lists[i]; + const auto& bounds = iter_bounds[i]; + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + 19 /* snapshot */)); + range_del_agg.AddTombstones(std::move(input_iter), &bounds.first, + &bounds.second); + } -TEST_F(RangeDelAggregatorTest, OverlappingLargestKeyTruncateBelowTombstone) { - const InternalKey smallest("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest( - "e", 3, // could happen if "e" is in consecutive sstables - kTypeValue); - VerifyRangeDels( - {{{{"a", "c", 10}, {"d", "f", 10}}, &smallest, &largest}}, - {{"a", 10, true}, // truncated - {"b", 10, false}, // not truncated - {"d", 10, false}, // not truncated - {"e", 10, false}, // not truncated - {"e", 2, true}}, // truncated here - {{"b", "c", 10}, {"d", "e", 10}}); -} + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false}, + {InternalValue("a", 9), false}, + {InternalValue("a", 4), true}, + {InternalValue("m", 10), false}, + {InternalValue("m", 9), true}, + {InternalValue("x", 10), false}, + {InternalValue("x", 9), false}, + {InternalValue("x", 5), true}, + {InternalValue("z", 9), false}}); + + VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, + {"_", "a", true}, + {"a", "n", true}, + {"l", "x", true}, + {"w", "z", true}, + {"zzz", "zz", false}, + {"zz", "zzz", false}}); +} + +TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregatorSameLevel) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}}); + std::vector> iter_bounds = { + {InternalKey("a", 4, kTypeValue), + InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)}, + {InternalKey("m", 20, kTypeValue), + InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)}, + {InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}}; + + ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19); + + auto add_iter_to_agg = [&](size_t i) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_lists[i].get(), + bytewise_icmp, 19 /* snapshot */)); + range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first, + &iter_bounds[i].second); + }; + + add_iter_to_agg(0); + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false}, + {InternalValue("a", 9), false}, + {InternalValue("a", 4), true}}); + + add_iter_to_agg(1); + VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false}, + {InternalValue("m", 9), true}}); + + add_iter_to_agg(2); + VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false}, + {InternalValue("x", 9), false}, + {InternalValue("x", 5), true}, + {InternalValue("z", 9), false}}); + + VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, + {"_", "a", true}, + {"a", "n", true}, + {"l", "x", true}, + {"w", "z", true}, + {"zzz", "zz", false}, + {"zz", "zzz", false}}); +} + +TEST_F(RangeDelAggregatorTest, CompactionAggregatorNoSnapshots) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + std::vector snapshots; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, OverlappingLargestKeyTruncateAboveTombstone) { - const InternalKey smallest("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest( - "e", 15, // could happen if "e" is in consecutive sstables - kTypeValue); - VerifyRangeDels( - {{{{"a", "c", 10}, {"d", "f", 10}}, &smallest, &largest}}, - {{"a", 10, true}, // truncated - {"b", 10, false}, // not truncated - {"d", 10, false}, // not truncated - {"e", kMaxSequenceNumber, true}}, // truncated - {{"b", "c", 10}, {"d", "e", 10}}); -} + VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true}, + {InternalValue("b", 19), false}, + {InternalValue("b", 9), true}, + {InternalValue("d", 9), true}, + {InternalValue("e", 7), true}, + {InternalValue("g", 7), false}, + {InternalValue("h", 24), true}, + {InternalValue("i", 24), false}, + {InternalValue("ii", 14), true}, + {InternalValue("j", 14), false}}); + + auto range_del_compaction_iter = range_del_agg.NewIterator(); + VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20}, + {"b", "c", 10}, + {"c", "e", 10}, + {"e", "g", 8}, + {"h", "i", 25}, + {"ii", "j", 15}}); +} + +TEST_F(RangeDelAggregatorTest, CompactionAggregatorWithSnapshots) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + std::vector snapshots{9, 19}; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, OverlappingSmallestKeyTruncateBelowTombstone) { - const InternalKey smallest("b", 5, kTypeValue); - const InternalKey largest("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"a", "c", 10}, {"d", "f", 10}}, &smallest, &largest}}, - {{"a", 10, true}, // truncated - {"b", 10, true}, // truncated - {"b", 6, false}, // not truncated; start boundary moved - {"d", 10, false}, // not truncated - {"e", kMaxSequenceNumber, true}}, // truncated - {{"b", "c", 10}, {"d", "e", 10}}); -} + VerifyShouldDelete( + &range_del_agg, + { + {InternalValue("a", 19), false}, // [10, 19] + {InternalValue("a", 9), false}, // [0, 9] + {InternalValue("b", 9), false}, // [0, 9] + {InternalValue("d", 9), false}, // [0, 9] + {InternalValue("d", 7), true}, // [0, 9] + {InternalValue("e", 7), true}, // [0, 9] + {InternalValue("g", 7), false}, // [0, 9] + {InternalValue("h", 24), true}, // [20, kMaxSequenceNumber] + {InternalValue("i", 24), false}, // [20, kMaxSequenceNumber] + {InternalValue("ii", 14), true}, // [10, 19] + {InternalValue("j", 14), false} // [10, 19] + }); + + auto range_del_compaction_iter = range_del_agg.NewIterator(); + VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20}, + {"a", "b", 10}, + {"b", "c", 10}, + {"c", "e", 10}, + {"c", "e", 8}, + {"e", "g", 8}, + {"h", "i", 25}, + {"ii", "j", 15}}); +} + +TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorLeft) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + std::vector snapshots{9, 19}; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, OverlappingSmallestKeyTruncateAboveTombstone) { - const InternalKey smallest("b", 15, kTypeValue); - const InternalKey largest("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"a", "c", 10}, {"d", "f", 10}}, &smallest, &largest}}, - {{"a", 10, true}, // truncated - {"b", 15, true}, // truncated - {"b", 10, false}, // not truncated - {"d", 10, false}, // not truncated - {"e", kMaxSequenceNumber, true}}, // truncated - {{"b", "c", 10}, {"d", "e", 10}}); + Slice start("_"); + Slice end("__"); } -TEST_F(RangeDelAggregatorTest, OverlappingBoundaryGapAboveTombstone) { - const InternalKey smallest1("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest1("c", 20, kTypeValue); - const InternalKey smallest2("c", 10, kTypeValue); - const InternalKey largest2("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"b", "d", 5}}, &smallest1, &largest1}, - {{{"b", "d", 5}}, &smallest2, &largest2}}, - {{"b", 5, false}, // not truncated - {"c", 5, false}}, // not truncated - {{"b", "c", 5}, {"c", "d", 5}}); // not collapsed due to boundaries -} +TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorRight) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); -TEST_F(RangeDelAggregatorTest, OverlappingBoundaryGapBelowTombstone) { - const InternalKey smallest1("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest1("c", 20, kTypeValue); - const InternalKey smallest2("c", 10, kTypeValue); - const InternalKey largest2("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"b", "d", 30}}, &smallest1, &largest1}, - {{{"b", "d", 30}}, &smallest2, &largest2}}, - {{"b", 30, false}, // not truncated - {"c", 30, false}, // not truncated - {"c", 19, true}, // truncated here (keys in this range should not exist) - {"c", 11, false}}, // not truncated again - {{"b", "c", 30}, {"c", "d", 30}}); // not collapsed due to boundaries -} + std::vector snapshots{9, 19}; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, OverlappingBoundaryGapContainsTombstone) { - const InternalKey smallest1("b", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest1("c", 20, kTypeValue); - const InternalKey smallest2("c", 10, kTypeValue); - const InternalKey largest2("e", kMaxSequenceNumber, kTypeRangeDeletion); - VerifyRangeDels( - {{{{"b", "d", 15}}, &smallest1, &largest1}, - {{{"b", "d", 15}}, &smallest2, &largest2}}, - {{"b", 15, false}, // not truncated - {"c", 15, true}, // truncated (keys in this range should not exist) - {"c", 11, false}}, // not truncated here - {{"b", "c", 15}, {"c", "d", 15}}); // not collapsed due to boundaries -} + Slice start("p"); + Slice end("q"); + auto range_del_compaction_iter1 = + range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); + VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {}); + + auto range_del_compaction_iter2 = + range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); + VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {}); +} + +TEST_F(RangeDelAggregatorTest, CompactionAggregatorBoundedIterator) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "e", 10}, {"c", "g", 8}}, + {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); + + std::vector snapshots{9, 19}; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, FileCoversOneKeyAndTombstoneAbove) { - const InternalKey smallest("a", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest("a", 20, kTypeValue); - VerifyRangeDels( - {{{{"a", "b", 35}}, &smallest, &largest}}, - {{"a", 40, true}, // not truncated - {"a", 35, false}}, // not truncated - {{"a", "a", 35}}); // empty tombstone but can't occur during a compaction -} + Slice start("bb"); + Slice end("e"); + auto range_del_compaction_iter1 = + range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); + VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), + {{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}}); + + auto range_del_compaction_iter2 = + range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); + VerifyFragmentedRangeDels( + range_del_compaction_iter2.get(), + {{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}}); +} + +TEST_F(RangeDelAggregatorTest, + CompactionAggregatorBoundedIteratorExtraFragments) { + auto fragment_lists = MakeFragmentedTombstoneLists( + {{{"a", "d", 10}, {"c", "g", 8}}, + {{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}}); + + std::vector snapshots{9, 19}; + CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots); + for (const auto& fragment_list : fragment_lists) { + std::unique_ptr input_iter( + new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, + kMaxSequenceNumber)); + range_del_agg.AddTombstones(std::move(input_iter)); + } -TEST_F(RangeDelAggregatorTest, FileCoversOneKeyAndTombstoneBelow) { - const InternalKey smallest("a", kMaxSequenceNumber, kTypeRangeDeletion); - const InternalKey largest("a", 20, kTypeValue); - VerifyRangeDels( - {{{{"a", "b", 15}}, &smallest, &largest}}, - {{"a", 20, true}, // truncated here - {"a", 15, true}}, // truncated - {{"a", "a", 15}}); // empty tombstone but can't occur during a compaction + Slice start("bb"); + Slice end("e"); + auto range_del_compaction_iter1 = + range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); + VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10}, + {"b", "c", 20}, + {"b", "c", 10}, + {"c", "d", 10}, + {"c", "d", 8}, + {"d", "f", 30}, + {"d", "f", 8}, + {"f", "g", 8}}); + + auto range_del_compaction_iter2 = + range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); + VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10}, + {"b", "c", 20}, + {"b", "c", 10}, + {"c", "d", 10}, + {"c", "d", 8}, + {"d", "f", 30}, + {"d", "f", 8}, + {"f", "g", 8}}); } } // namespace rocksdb diff --git a/db/range_del_aggregator_v2.cc b/db/range_del_aggregator_v2.cc deleted file mode 100644 index b0667f6fd..000000000 --- a/db/range_del_aggregator_v2.cc +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#include "db/range_del_aggregator_v2.h" - -#include "db/compaction_iteration_stats.h" -#include "db/dbformat.h" -#include "db/pinned_iterators_manager.h" -#include "db/range_del_aggregator.h" -#include "db/range_tombstone_fragmenter.h" -#include "db/version_edit.h" -#include "include/rocksdb/comparator.h" -#include "include/rocksdb/types.h" -#include "table/internal_iterator.h" -#include "table/scoped_arena_iterator.h" -#include "table/table_builder.h" -#include "util/heap.h" -#include "util/kv_map.h" -#include "util/vector_iterator.h" - -namespace rocksdb { - -TruncatedRangeDelIterator::TruncatedRangeDelIterator( - std::unique_ptr iter, - const InternalKeyComparator* icmp, const InternalKey* smallest, - const InternalKey* largest) - : iter_(std::move(iter)), - icmp_(icmp), - smallest_ikey_(smallest), - largest_ikey_(largest) { - if (smallest != nullptr) { - pinned_bounds_.emplace_back(); - auto& parsed_smallest = pinned_bounds_.back(); - if (!ParseInternalKey(smallest->Encode(), &parsed_smallest)) { - assert(false); - } - smallest_ = &parsed_smallest; - } - if (largest != nullptr) { - pinned_bounds_.emplace_back(); - auto& parsed_largest = pinned_bounds_.back(); - if (!ParseInternalKey(largest->Encode(), &parsed_largest)) { - assert(false); - } - if (parsed_largest.type == kTypeRangeDeletion && - parsed_largest.sequence == kMaxSequenceNumber) { - // The file boundary has been artificially extended by a range tombstone. - // We do not need to adjust largest to properly truncate range - // tombstones that extend past the boundary. - } else if (parsed_largest.sequence == 0) { - // The largest key in the sstable has a sequence number of 0. Since we - // guarantee that no internal keys with the same user key and sequence - // number can exist in a DB, we know that the largest key in this sstable - // cannot exist as the smallest key in the next sstable. This further - // implies that no range tombstone in this sstable covers largest; - // otherwise, the file boundary would have been artificially extended. - // - // Therefore, we will never truncate a range tombstone at largest, so we - // can leave it unchanged. - } else { - // The same user key may straddle two sstable boundaries. To ensure that - // the truncated end key can cover the largest key in this sstable, reduce - // its sequence number by 1. - parsed_largest.sequence -= 1; - } - largest_ = &parsed_largest; - } -} - -bool TruncatedRangeDelIterator::Valid() const { - return iter_->Valid() && - (smallest_ == nullptr || - icmp_->Compare(*smallest_, iter_->parsed_end_key()) < 0) && - (largest_ == nullptr || - icmp_->Compare(iter_->parsed_start_key(), *largest_) < 0); -} - -void TruncatedRangeDelIterator::Next() { iter_->TopNext(); } - -void TruncatedRangeDelIterator::Prev() { iter_->TopPrev(); } - -void TruncatedRangeDelIterator::InternalNext() { iter_->Next(); } - -// NOTE: target is a user key -void TruncatedRangeDelIterator::Seek(const Slice& target) { - if (largest_ != nullptr && - icmp_->Compare(*largest_, ParsedInternalKey(target, kMaxSequenceNumber, - kTypeRangeDeletion)) <= 0) { - iter_->Invalidate(); - return; - } - if (smallest_ != nullptr && - icmp_->user_comparator()->Compare(target, smallest_->user_key) < 0) { - iter_->Seek(smallest_->user_key); - return; - } - iter_->Seek(target); -} - -// NOTE: target is a user key -void TruncatedRangeDelIterator::SeekForPrev(const Slice& target) { - if (smallest_ != nullptr && - icmp_->Compare(ParsedInternalKey(target, 0, kTypeRangeDeletion), - *smallest_) < 0) { - iter_->Invalidate(); - return; - } - if (largest_ != nullptr && - icmp_->user_comparator()->Compare(largest_->user_key, target) < 0) { - iter_->SeekForPrev(largest_->user_key); - return; - } - iter_->SeekForPrev(target); -} - -void TruncatedRangeDelIterator::SeekToFirst() { - if (smallest_ != nullptr) { - iter_->Seek(smallest_->user_key); - return; - } - iter_->SeekToTopFirst(); -} - -void TruncatedRangeDelIterator::SeekToLast() { - if (largest_ != nullptr) { - iter_->SeekForPrev(largest_->user_key); - return; - } - iter_->SeekToTopLast(); -} - -std::map> -TruncatedRangeDelIterator::SplitBySnapshot( - const std::vector& snapshots) { - using FragmentedIterPair = - std::pair>; - - auto split_untruncated_iters = iter_->SplitBySnapshot(snapshots); - std::map> - split_truncated_iters; - std::for_each( - split_untruncated_iters.begin(), split_untruncated_iters.end(), - [&](FragmentedIterPair& iter_pair) { - std::unique_ptr truncated_iter( - new TruncatedRangeDelIterator(std::move(iter_pair.second), icmp_, - smallest_ikey_, largest_ikey_)); - split_truncated_iters.emplace(iter_pair.first, - std::move(truncated_iter)); - }); - return split_truncated_iters; -} - -ForwardRangeDelIterator::ForwardRangeDelIterator( - const InternalKeyComparator* icmp, - const std::vector>* iters) - : icmp_(icmp), - iters_(iters), - unused_idx_(0), - active_seqnums_(SeqMaxComparator()), - active_iters_(EndKeyMinComparator(icmp)), - inactive_iters_(StartKeyMinComparator(icmp)) {} - -bool ForwardRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) { - assert(iters_ != nullptr); - // Move active iterators that end before parsed. - while (!active_iters_.empty() && - icmp_->Compare((*active_iters_.top())->end_key(), parsed) <= 0) { - TruncatedRangeDelIterator* iter = PopActiveIter(); - do { - iter->Next(); - } while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0); - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - // Move inactive iterators that start before parsed. - while (!inactive_iters_.empty() && - icmp_->Compare(inactive_iters_.top()->start_key(), parsed) <= 0) { - TruncatedRangeDelIterator* iter = PopInactiveIter(); - while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0) { - iter->Next(); - } - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - return active_seqnums_.empty() - ? false - : (*active_seqnums_.begin())->seq() > parsed.sequence; -} - -void ForwardRangeDelIterator::Invalidate() { - unused_idx_ = 0; - active_iters_.clear(); - active_seqnums_.clear(); - inactive_iters_.clear(); -} - -ReverseRangeDelIterator::ReverseRangeDelIterator( - const InternalKeyComparator* icmp, - const std::vector>* iters) - : icmp_(icmp), - iters_(iters), - unused_idx_(0), - active_seqnums_(SeqMaxComparator()), - active_iters_(StartKeyMaxComparator(icmp)), - inactive_iters_(EndKeyMaxComparator(icmp)) {} - -bool ReverseRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) { - assert(iters_ != nullptr); - // Move active iterators that start after parsed. - while (!active_iters_.empty() && - icmp_->Compare(parsed, (*active_iters_.top())->start_key()) < 0) { - TruncatedRangeDelIterator* iter = PopActiveIter(); - do { - iter->Prev(); - } while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0); - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - // Move inactive iterators that end after parsed. - while (!inactive_iters_.empty() && - icmp_->Compare(parsed, inactive_iters_.top()->end_key()) < 0) { - TruncatedRangeDelIterator* iter = PopInactiveIter(); - while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0) { - iter->Prev(); - } - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - return active_seqnums_.empty() - ? false - : (*active_seqnums_.begin())->seq() > parsed.sequence; -} - -void ReverseRangeDelIterator::Invalidate() { - unused_idx_ = 0; - active_iters_.clear(); - active_seqnums_.clear(); - inactive_iters_.clear(); -} - -bool RangeDelAggregatorV2::StripeRep::ShouldDelete( - const ParsedInternalKey& parsed, RangeDelPositioningMode mode) { - if (!InStripe(parsed.sequence) || IsEmpty()) { - return false; - } - switch (mode) { - case RangeDelPositioningMode::kForwardTraversal: - InvalidateReverseIter(); - - // Pick up previously unseen iterators. - for (auto it = std::next(iters_.begin(), forward_iter_.UnusedIdx()); - it != iters_.end(); ++it, forward_iter_.IncUnusedIdx()) { - auto& iter = *it; - forward_iter_.AddNewIter(iter.get(), parsed); - } - - return forward_iter_.ShouldDelete(parsed); - case RangeDelPositioningMode::kBackwardTraversal: - InvalidateForwardIter(); - - // Pick up previously unseen iterators. - for (auto it = std::next(iters_.begin(), reverse_iter_.UnusedIdx()); - it != iters_.end(); ++it, reverse_iter_.IncUnusedIdx()) { - auto& iter = *it; - reverse_iter_.AddNewIter(iter.get(), parsed); - } - - return reverse_iter_.ShouldDelete(parsed); - default: - assert(false); - return false; - } -} - -bool RangeDelAggregatorV2::StripeRep::IsRangeOverlapped(const Slice& start, - const Slice& end) { - Invalidate(); - - // Set the internal start/end keys so that: - // - if start_ikey has the same user key and sequence number as the - // current end key, start_ikey will be considered greater; and - // - if end_ikey has the same user key and sequence number as the current - // start key, end_ikey will be considered greater. - ParsedInternalKey start_ikey(start, kMaxSequenceNumber, - static_cast(0)); - ParsedInternalKey end_ikey(end, 0, static_cast(0)); - for (auto& iter : iters_) { - bool checked_candidate_tombstones = false; - for (iter->SeekForPrev(start); - iter->Valid() && icmp_->Compare(iter->start_key(), end_ikey) <= 0; - iter->Next()) { - checked_candidate_tombstones = true; - if (icmp_->Compare(start_ikey, iter->end_key()) < 0 && - icmp_->Compare(iter->start_key(), end_ikey) <= 0) { - return true; - } - } - - if (!checked_candidate_tombstones) { - // Do an additional check for when the end of the range is the begin - // key of a tombstone, which we missed earlier since SeekForPrev'ing - // to the start was invalid. - iter->SeekForPrev(end); - if (iter->Valid() && icmp_->Compare(start_ikey, iter->end_key()) < 0 && - icmp_->Compare(iter->start_key(), end_ikey) <= 0) { - return true; - } - } - } - return false; -} - -void ReadRangeDelAggregatorV2::AddTombstones( - std::unique_ptr input_iter, - const InternalKey* smallest, const InternalKey* largest) { - if (input_iter == nullptr || input_iter->empty()) { - return; - } - rep_.AddTombstones( - std::unique_ptr(new TruncatedRangeDelIterator( - std::move(input_iter), icmp_, smallest, largest))); -} - -bool ReadRangeDelAggregatorV2::ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) { - return rep_.ShouldDelete(parsed, mode); -} - -bool ReadRangeDelAggregatorV2::IsRangeOverlapped(const Slice& start, - const Slice& end) { - InvalidateRangeDelMapPositions(); - return rep_.IsRangeOverlapped(start, end); -} - -void CompactionRangeDelAggregatorV2::AddTombstones( - std::unique_ptr input_iter, - const InternalKey* smallest, const InternalKey* largest) { - if (input_iter == nullptr || input_iter->empty()) { - return; - } - assert(input_iter->lower_bound() == 0); - assert(input_iter->upper_bound() == kMaxSequenceNumber); - parent_iters_.emplace_back(new TruncatedRangeDelIterator( - std::move(input_iter), icmp_, smallest, largest)); - - auto split_iters = parent_iters_.back()->SplitBySnapshot(*snapshots_); - for (auto& split_iter : split_iters) { - auto it = reps_.find(split_iter.first); - if (it == reps_.end()) { - bool inserted; - SequenceNumber upper_bound = split_iter.second->upper_bound(); - SequenceNumber lower_bound = split_iter.second->lower_bound(); - std::tie(it, inserted) = reps_.emplace( - split_iter.first, StripeRep(icmp_, upper_bound, lower_bound)); - assert(inserted); - } - assert(it != reps_.end()); - it->second.AddTombstones(std::move(split_iter.second)); - } -} - -bool CompactionRangeDelAggregatorV2::ShouldDelete( - const ParsedInternalKey& parsed, RangeDelPositioningMode mode) { - auto it = reps_.lower_bound(parsed.sequence); - if (it == reps_.end()) { - return false; - } - return it->second.ShouldDelete(parsed, mode); -} - -namespace { - -class TruncatedRangeDelMergingIter : public InternalIterator { - public: - TruncatedRangeDelMergingIter( - const InternalKeyComparator* icmp, const Slice* lower_bound, - const Slice* upper_bound, bool upper_bound_inclusive, - const std::vector>& children) - : icmp_(icmp), - lower_bound_(lower_bound), - upper_bound_(upper_bound), - upper_bound_inclusive_(upper_bound_inclusive), - heap_(StartKeyMinComparator(icmp)) { - for (auto& child : children) { - if (child != nullptr) { - assert(child->lower_bound() == 0); - assert(child->upper_bound() == kMaxSequenceNumber); - children_.push_back(child.get()); - } - } - } - - bool Valid() const override { - return !heap_.empty() && BeforeEndKey(heap_.top()); - } - Status status() const override { return Status::OK(); } - - void SeekToFirst() override { - heap_.clear(); - for (auto& child : children_) { - if (lower_bound_ != nullptr) { - child->Seek(*lower_bound_); - } else { - child->SeekToFirst(); - } - if (child->Valid()) { - heap_.push(child); - } - } - } - - void Next() override { - auto* top = heap_.top(); - top->InternalNext(); - if (top->Valid()) { - heap_.replace_top(top); - } else { - heap_.pop(); - } - } - - Slice key() const override { - auto* top = heap_.top(); - cur_start_key_.Set(top->start_key().user_key, top->seq(), - kTypeRangeDeletion); - return cur_start_key_.Encode(); - } - - Slice value() const override { - auto* top = heap_.top(); - assert(top->end_key().sequence == kMaxSequenceNumber); - return top->end_key().user_key; - } - - // Unused InternalIterator methods - void Prev() override { assert(false); } - void Seek(const Slice& /* target */) override { assert(false); } - void SeekForPrev(const Slice& /* target */) override { assert(false); } - void SeekToLast() override { assert(false); } - - private: - bool BeforeEndKey(const TruncatedRangeDelIterator* iter) const { - if (upper_bound_ == nullptr) { - return true; - } - int cmp = icmp_->user_comparator()->Compare(iter->start_key().user_key, - *upper_bound_); - return upper_bound_inclusive_ ? cmp <= 0 : cmp < 0; - } - - const InternalKeyComparator* icmp_; - const Slice* lower_bound_; - const Slice* upper_bound_; - bool upper_bound_inclusive_; - BinaryHeap heap_; - std::vector children_; - - mutable InternalKey cur_start_key_; -}; - -} // namespace - -std::unique_ptr -CompactionRangeDelAggregatorV2::NewIterator(const Slice* lower_bound, - const Slice* upper_bound, - bool upper_bound_inclusive) { - InvalidateRangeDelMapPositions(); - std::unique_ptr merging_iter( - new TruncatedRangeDelMergingIter(icmp_, lower_bound, upper_bound, - upper_bound_inclusive, parent_iters_)); - - // TODO: add tests where tombstone fragments can be outside of upper and lower - // bound range - auto fragmented_tombstone_list = - std::make_shared( - std::move(merging_iter), *icmp_, true /* for_compaction */, - *snapshots_); - - return std::unique_ptr( - new FragmentedRangeTombstoneIterator( - fragmented_tombstone_list, *icmp_, - kMaxSequenceNumber /* upper_bound */)); -} - -} // namespace rocksdb diff --git a/db/range_del_aggregator_v2.h b/db/range_del_aggregator_v2.h deleted file mode 100644 index 306dbf249..000000000 --- a/db/range_del_aggregator_v2.h +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "db/compaction_iteration_stats.h" -#include "db/dbformat.h" -#include "db/pinned_iterators_manager.h" -#include "db/range_del_aggregator.h" -#include "db/range_tombstone_fragmenter.h" -#include "db/version_edit.h" -#include "include/rocksdb/comparator.h" -#include "include/rocksdb/types.h" -#include "table/internal_iterator.h" -#include "table/scoped_arena_iterator.h" -#include "table/table_builder.h" -#include "util/heap.h" -#include "util/kv_map.h" - -namespace rocksdb { - -class TruncatedRangeDelIterator { - public: - TruncatedRangeDelIterator( - std::unique_ptr iter, - const InternalKeyComparator* icmp, const InternalKey* smallest, - const InternalKey* largest); - - bool Valid() const; - - void Next(); - void Prev(); - - void InternalNext(); - - // Seeks to the tombstone with the highest viisble sequence number that covers - // target (a user key). If no such tombstone exists, the position will be at - // the earliest tombstone that ends after target. - void Seek(const Slice& target); - - // Seeks to the tombstone with the highest viisble sequence number that covers - // target (a user key). If no such tombstone exists, the position will be at - // the latest tombstone that starts before target. - void SeekForPrev(const Slice& target); - - void SeekToFirst(); - void SeekToLast(); - - ParsedInternalKey start_key() const { - return (smallest_ == nullptr || - icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0) - ? iter_->parsed_start_key() - : *smallest_; - } - - ParsedInternalKey end_key() const { - return (largest_ == nullptr || - icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0) - ? iter_->parsed_end_key() - : *largest_; - } - - SequenceNumber seq() const { return iter_->seq(); } - - std::map> - SplitBySnapshot(const std::vector& snapshots); - - SequenceNumber upper_bound() const { return iter_->upper_bound(); } - - SequenceNumber lower_bound() const { return iter_->lower_bound(); } - - private: - std::unique_ptr iter_; - const InternalKeyComparator* icmp_; - const ParsedInternalKey* smallest_ = nullptr; - const ParsedInternalKey* largest_ = nullptr; - std::list pinned_bounds_; - - const InternalKey* smallest_ikey_; - const InternalKey* largest_ikey_; -}; - -struct SeqMaxComparator { - bool operator()(const TruncatedRangeDelIterator* a, - const TruncatedRangeDelIterator* b) const { - return a->seq() > b->seq(); - } -}; - -struct StartKeyMinComparator { - explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {} - - bool operator()(const TruncatedRangeDelIterator* a, - const TruncatedRangeDelIterator* b) const { - return icmp->Compare(a->start_key(), b->start_key()) > 0; - } - - const InternalKeyComparator* icmp; -}; - -class ForwardRangeDelIterator { - public: - ForwardRangeDelIterator( - const InternalKeyComparator* icmp, - const std::vector>* iters); - - bool ShouldDelete(const ParsedInternalKey& parsed); - void Invalidate(); - - void AddNewIter(TruncatedRangeDelIterator* iter, - const ParsedInternalKey& parsed) { - iter->Seek(parsed.user_key); - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - size_t UnusedIdx() const { return unused_idx_; } - void IncUnusedIdx() { unused_idx_++; } - - private: - using ActiveSeqSet = - std::multiset; - - struct EndKeyMinComparator { - explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {} - - bool operator()(const ActiveSeqSet::const_iterator& a, - const ActiveSeqSet::const_iterator& b) const { - return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0; - } - - const InternalKeyComparator* icmp; - }; - - void PushIter(TruncatedRangeDelIterator* iter, - const ParsedInternalKey& parsed) { - if (!iter->Valid()) { - // The iterator has been fully consumed, so we don't need to add it to - // either of the heaps. - return; - } - int cmp = icmp_->Compare(parsed, iter->start_key()); - if (cmp < 0) { - PushInactiveIter(iter); - } else { - PushActiveIter(iter); - } - } - - void PushActiveIter(TruncatedRangeDelIterator* iter) { - auto seq_pos = active_seqnums_.insert(iter); - active_iters_.push(seq_pos); - } - - TruncatedRangeDelIterator* PopActiveIter() { - auto active_top = active_iters_.top(); - auto iter = *active_top; - active_iters_.pop(); - active_seqnums_.erase(active_top); - return iter; - } - - void PushInactiveIter(TruncatedRangeDelIterator* iter) { - inactive_iters_.push(iter); - } - - TruncatedRangeDelIterator* PopInactiveIter() { - auto* iter = inactive_iters_.top(); - inactive_iters_.pop(); - return iter; - } - - const InternalKeyComparator* icmp_; - const std::vector>* iters_; - size_t unused_idx_; - ActiveSeqSet active_seqnums_; - BinaryHeap active_iters_; - BinaryHeap inactive_iters_; -}; - -class ReverseRangeDelIterator { - public: - ReverseRangeDelIterator( - const InternalKeyComparator* icmp, - const std::vector>* iters); - - bool ShouldDelete(const ParsedInternalKey& parsed); - void Invalidate(); - - void AddNewIter(TruncatedRangeDelIterator* iter, - const ParsedInternalKey& parsed) { - iter->SeekForPrev(parsed.user_key); - PushIter(iter, parsed); - assert(active_iters_.size() == active_seqnums_.size()); - } - - size_t UnusedIdx() const { return unused_idx_; } - void IncUnusedIdx() { unused_idx_++; } - - private: - using ActiveSeqSet = - std::multiset; - - struct EndKeyMaxComparator { - explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {} - - bool operator()(const TruncatedRangeDelIterator* a, - const TruncatedRangeDelIterator* b) const { - return icmp->Compare(a->end_key(), b->end_key()) < 0; - } - - const InternalKeyComparator* icmp; - }; - struct StartKeyMaxComparator { - explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {} - - bool operator()(const ActiveSeqSet::const_iterator& a, - const ActiveSeqSet::const_iterator& b) const { - return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0; - } - - const InternalKeyComparator* icmp; - }; - - void PushIter(TruncatedRangeDelIterator* iter, - const ParsedInternalKey& parsed) { - if (!iter->Valid()) { - // The iterator has been fully consumed, so we don't need to add it to - // either of the heaps. - } else if (icmp_->Compare(iter->end_key(), parsed) <= 0) { - PushInactiveIter(iter); - } else { - PushActiveIter(iter); - } - } - - void PushActiveIter(TruncatedRangeDelIterator* iter) { - auto seq_pos = active_seqnums_.insert(iter); - active_iters_.push(seq_pos); - } - - TruncatedRangeDelIterator* PopActiveIter() { - auto active_top = active_iters_.top(); - auto iter = *active_top; - active_iters_.pop(); - active_seqnums_.erase(active_top); - return iter; - } - - void PushInactiveIter(TruncatedRangeDelIterator* iter) { - inactive_iters_.push(iter); - } - - TruncatedRangeDelIterator* PopInactiveIter() { - auto* iter = inactive_iters_.top(); - inactive_iters_.pop(); - return iter; - } - - const InternalKeyComparator* icmp_; - const std::vector>* iters_; - size_t unused_idx_; - ActiveSeqSet active_seqnums_; - BinaryHeap active_iters_; - BinaryHeap inactive_iters_; -}; - -class RangeDelAggregatorV2 { - public: - explicit RangeDelAggregatorV2(const InternalKeyComparator* icmp) - : icmp_(icmp) {} - virtual ~RangeDelAggregatorV2() {} - - virtual void AddTombstones( - std::unique_ptr input_iter, - const InternalKey* smallest = nullptr, - const InternalKey* largest = nullptr) = 0; - - bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) { - ParsedInternalKey parsed; - if (!ParseInternalKey(key, &parsed)) { - return false; - } - return ShouldDelete(parsed, mode); - } - virtual bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) = 0; - - virtual void InvalidateRangeDelMapPositions() = 0; - - virtual bool IsEmpty() const = 0; - - bool AddFile(uint64_t file_number) { - return files_seen_.insert(file_number).second; - } - - protected: - class StripeRep { - public: - StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound, - SequenceNumber lower_bound) - : icmp_(icmp), - forward_iter_(icmp, &iters_), - reverse_iter_(icmp, &iters_), - upper_bound_(upper_bound), - lower_bound_(lower_bound) {} - - void AddTombstones(std::unique_ptr input_iter) { - iters_.push_back(std::move(input_iter)); - } - - bool IsEmpty() const { return iters_.empty(); } - - bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode); - - void Invalidate() { - InvalidateForwardIter(); - InvalidateReverseIter(); - } - - bool IsRangeOverlapped(const Slice& start, const Slice& end); - - private: - bool InStripe(SequenceNumber seq) const { - return lower_bound_ <= seq && seq <= upper_bound_; - } - - void InvalidateForwardIter() { forward_iter_.Invalidate(); } - - void InvalidateReverseIter() { reverse_iter_.Invalidate(); } - - const InternalKeyComparator* icmp_; - std::vector> iters_; - ForwardRangeDelIterator forward_iter_; - ReverseRangeDelIterator reverse_iter_; - SequenceNumber upper_bound_; - SequenceNumber lower_bound_; - }; - - const InternalKeyComparator* icmp_; - - private: - std::set files_seen_; -}; - -class ReadRangeDelAggregatorV2 : public RangeDelAggregatorV2 { - public: - ReadRangeDelAggregatorV2(const InternalKeyComparator* icmp, - SequenceNumber upper_bound) - : RangeDelAggregatorV2(icmp), - rep_(icmp, upper_bound, 0 /* lower_bound */) {} - ~ReadRangeDelAggregatorV2() override {} - - using RangeDelAggregatorV2::ShouldDelete; - void AddTombstones( - std::unique_ptr input_iter, - const InternalKey* smallest = nullptr, - const InternalKey* largest = nullptr) override; - - bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) override; - - bool IsRangeOverlapped(const Slice& start, const Slice& end); - - void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); } - - bool IsEmpty() const override { return rep_.IsEmpty(); } - - private: - StripeRep rep_; -}; - -class CompactionRangeDelAggregatorV2 : public RangeDelAggregatorV2 { - public: - CompactionRangeDelAggregatorV2(const InternalKeyComparator* icmp, - const std::vector& snapshots) - : RangeDelAggregatorV2(icmp), snapshots_(&snapshots) {} - ~CompactionRangeDelAggregatorV2() override {} - - void AddTombstones( - std::unique_ptr input_iter, - const InternalKey* smallest = nullptr, - const InternalKey* largest = nullptr) override; - - using RangeDelAggregatorV2::ShouldDelete; - bool ShouldDelete(const ParsedInternalKey& parsed, - RangeDelPositioningMode mode) override; - - bool IsRangeOverlapped(const Slice& start, const Slice& end); - - void InvalidateRangeDelMapPositions() override { - for (auto& rep : reps_) { - rep.second.Invalidate(); - } - } - - bool IsEmpty() const override { - for (const auto& rep : reps_) { - if (!rep.second.IsEmpty()) { - return false; - } - } - return true; - } - - // Creates an iterator over all the range tombstones in the aggregator, for - // use in compaction. Nullptr arguments indicate that the iterator range is - // unbounded. - // NOTE: the boundaries are used for optimization purposes to reduce the - // number of tombstones that are passed to the fragmenter; they do not - // guarantee that the resulting iterator only contains range tombstones that - // cover keys in the provided range. If required, these bounds must be - // enforced during iteration. - std::unique_ptr NewIterator( - const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr, - bool upper_bound_inclusive = false); - - private: - std::vector> parent_iters_; - std::map reps_; - - const std::vector* snapshots_; -}; - -} // namespace rocksdb diff --git a/db/range_del_aggregator_v2_test.cc b/db/range_del_aggregator_v2_test.cc deleted file mode 100644 index 64f8ed079..000000000 --- a/db/range_del_aggregator_v2_test.cc +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright (c) 2018-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#include "db/range_del_aggregator_v2.h" - -#include -#include -#include - -#include "db/db_test_util.h" -#include "db/dbformat.h" -#include "db/range_tombstone_fragmenter.h" -#include "util/testutil.h" - -namespace rocksdb { - -class RangeDelAggregatorV2Test : public testing::Test {}; - -namespace { - -static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator()); - -std::unique_ptr MakeRangeDelIter( - const std::vector& range_dels) { - std::vector keys, values; - for (const auto& range_del : range_dels) { - auto key_and_value = range_del.Serialize(); - keys.push_back(key_and_value.first.Encode().ToString()); - values.push_back(key_and_value.second.ToString()); - } - return std::unique_ptr( - new test::VectorIterator(keys, values)); -} - -std::vector> -MakeFragmentedTombstoneLists( - const std::vector>& range_dels_list) { - std::vector> fragment_lists; - for (const auto& range_dels : range_dels_list) { - auto range_del_iter = MakeRangeDelIter(range_dels); - fragment_lists.emplace_back(new FragmentedRangeTombstoneList( - std::move(range_del_iter), bytewise_icmp)); - } - return fragment_lists; -} - -struct TruncatedIterScanTestCase { - ParsedInternalKey start; - ParsedInternalKey end; - SequenceNumber seq; -}; - -struct TruncatedIterSeekTestCase { - Slice target; - ParsedInternalKey start; - ParsedInternalKey end; - SequenceNumber seq; - bool invalid; -}; - -struct ShouldDeleteTestCase { - ParsedInternalKey lookup_key; - bool result; -}; - -struct IsRangeOverlappedTestCase { - Slice start; - Slice end; - bool result; -}; - -ParsedInternalKey UncutEndpoint(const Slice& s) { - return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion); -} - -ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq) { - return ParsedInternalKey(key, seq, kTypeValue); -} - -void VerifyIterator( - TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp, - const std::vector& expected_range_dels) { - // Test forward iteration. - iter->SeekToFirst(); - for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) { - ASSERT_TRUE(iter->Valid()); - EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start)); - EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end)); - EXPECT_EQ(expected_range_dels[i].seq, iter->seq()); - } - EXPECT_FALSE(iter->Valid()); - - // Test reverse iteration. - iter->SeekToLast(); - std::vector reverse_expected_range_dels( - expected_range_dels.rbegin(), expected_range_dels.rend()); - for (size_t i = 0; i < reverse_expected_range_dels.size(); - i++, iter->Prev()) { - ASSERT_TRUE(iter->Valid()); - EXPECT_EQ(0, icmp.Compare(iter->start_key(), - reverse_expected_range_dels[i].start)); - EXPECT_EQ( - 0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end)); - EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq()); - } - EXPECT_FALSE(iter->Valid()); -} - -void VerifySeek(TruncatedRangeDelIterator* iter, - const InternalKeyComparator& icmp, - const std::vector& test_cases) { - for (const auto& test_case : test_cases) { - iter->Seek(test_case.target); - if (test_case.invalid) { - ASSERT_FALSE(iter->Valid()); - } else { - ASSERT_TRUE(iter->Valid()); - EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start)); - EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end)); - EXPECT_EQ(test_case.seq, iter->seq()); - } - } -} - -void VerifySeekForPrev( - TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp, - const std::vector& test_cases) { - for (const auto& test_case : test_cases) { - iter->SeekForPrev(test_case.target); - if (test_case.invalid) { - ASSERT_FALSE(iter->Valid()); - } else { - ASSERT_TRUE(iter->Valid()); - EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start)); - EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end)); - EXPECT_EQ(test_case.seq, iter->seq()); - } - } -} - -void VerifyShouldDelete(RangeDelAggregatorV2* range_del_agg, - const std::vector& test_cases) { - for (const auto& test_case : test_cases) { - EXPECT_EQ( - test_case.result, - range_del_agg->ShouldDelete( - test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal)); - } - for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) { - const auto& test_case = *it; - EXPECT_EQ( - test_case.result, - range_del_agg->ShouldDelete( - test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal)); - } -} - -void VerifyIsRangeOverlapped( - ReadRangeDelAggregatorV2* range_del_agg, - const std::vector& test_cases) { - for (const auto& test_case : test_cases) { - EXPECT_EQ(test_case.result, - range_del_agg->IsRangeOverlapped(test_case.start, test_case.end)); - } -} - -void CheckIterPosition(const RangeTombstone& tombstone, - const FragmentedRangeTombstoneIterator* iter) { - // Test InternalIterator interface. - EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key())); - EXPECT_EQ(tombstone.end_key_, iter->value()); - EXPECT_EQ(tombstone.seq_, iter->seq()); - - // Test FragmentedRangeTombstoneIterator interface. - EXPECT_EQ(tombstone.start_key_, iter->start_key()); - EXPECT_EQ(tombstone.end_key_, iter->end_key()); - EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key())); -} - -void VerifyFragmentedRangeDels( - FragmentedRangeTombstoneIterator* iter, - const std::vector& expected_tombstones) { - iter->SeekToFirst(); - for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) { - ASSERT_TRUE(iter->Valid()); - CheckIterPosition(expected_tombstones[i], iter); - } - EXPECT_FALSE(iter->Valid()); -} - -} // namespace - -TEST_F(RangeDelAggregatorV2Test, EmptyTruncatedIter) { - auto range_del_iter = MakeRangeDelIter({}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - kMaxSequenceNumber)); - - TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, - nullptr); - - iter.SeekToFirst(); - ASSERT_FALSE(iter.Valid()); - - iter.SeekToLast(); - ASSERT_FALSE(iter.Valid()); -} - -TEST_F(RangeDelAggregatorV2Test, UntruncatedIter) { - auto range_del_iter = - MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - kMaxSequenceNumber)); - - TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, - nullptr); - - VerifyIterator(&iter, bytewise_icmp, - {{UncutEndpoint("a"), UncutEndpoint("e"), 10}, - {UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {UncutEndpoint("j"), UncutEndpoint("n"), 4}}); - - VerifySeek( - &iter, bytewise_icmp, - {{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4}, - {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, - {"", UncutEndpoint("a"), UncutEndpoint("e"), 10}}); - - VerifySeekForPrev( - &iter, bytewise_icmp, - {{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"n", UncutEndpoint("j"), UncutEndpoint("n"), 4}, - {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); -} - -TEST_F(RangeDelAggregatorV2Test, UntruncatedIterWithSnapshot) { - auto range_del_iter = - MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - 9 /* snapshot */)); - - TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr, - nullptr); - - VerifyIterator(&iter, bytewise_icmp, - {{UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {UncutEndpoint("j"), UncutEndpoint("n"), 4}}); - - VerifySeek( - &iter, bytewise_icmp, - {{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4}, - {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, - {"", UncutEndpoint("e"), UncutEndpoint("g"), 8}}); - - VerifySeekForPrev( - &iter, bytewise_icmp, - {{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"n", UncutEndpoint("j"), UncutEndpoint("n"), 4}, - {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); -} - -TEST_F(RangeDelAggregatorV2Test, TruncatedIterPartiallyCutTombstones) { - auto range_del_iter = - MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - kMaxSequenceNumber)); - - InternalKey smallest("d", 7, kTypeValue); - InternalKey largest("m", 9, kTypeValue); - TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, - &smallest, &largest); - - VerifyIterator(&iter, bytewise_icmp, - {{InternalValue("d", 7), UncutEndpoint("e"), 10}, - {UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {UncutEndpoint("j"), InternalValue("m", 8), 4}}); - - VerifySeek( - &iter, bytewise_icmp, - {{"d", InternalValue("d", 7), UncutEndpoint("e"), 10}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("j"), InternalValue("m", 8), 4}, - {"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, - {"", InternalValue("d", 7), UncutEndpoint("e"), 10}}); - - VerifySeekForPrev( - &iter, bytewise_icmp, - {{"d", InternalValue("d", 7), UncutEndpoint("e"), 10}, - {"e", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8}, - {"n", UncutEndpoint("j"), InternalValue("m", 8), 4}, - {"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); -} - -TEST_F(RangeDelAggregatorV2Test, TruncatedIterFullyCutTombstones) { - auto range_del_iter = - MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - kMaxSequenceNumber)); - - InternalKey smallest("f", 7, kTypeValue); - InternalKey largest("i", 9, kTypeValue); - TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, - &smallest, &largest); - - VerifyIterator(&iter, bytewise_icmp, - {{InternalValue("f", 7), UncutEndpoint("g"), 8}}); - - VerifySeek( - &iter, bytewise_icmp, - {{"d", InternalValue("f", 7), UncutEndpoint("g"), 8}, - {"f", InternalValue("f", 7), UncutEndpoint("g"), 8}, - {"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}}); - - VerifySeekForPrev( - &iter, bytewise_icmp, - {{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}, - {"f", InternalValue("f", 7), UncutEndpoint("g"), 8}, - {"j", InternalValue("f", 7), UncutEndpoint("g"), 8}}); -} - -TEST_F(RangeDelAggregatorV2Test, SingleIterInAggregator) { - auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}}); - FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter), - bytewise_icmp); - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp, - kMaxSequenceNumber)); - - ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber); - range_del_agg.AddTombstones(std::move(input_iter)); - - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false}, - {InternalValue("b", 9), true}, - {InternalValue("d", 9), true}, - {InternalValue("e", 7), true}, - {InternalValue("g", 7), false}}); - - VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, - {"_", "a", true}, - {"a", "c", true}, - {"d", "f", true}, - {"g", "l", false}}); -} - -TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregator) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true}, - {InternalValue("b", 19), false}, - {InternalValue("b", 9), true}, - {InternalValue("d", 9), true}, - {InternalValue("e", 7), true}, - {InternalValue("g", 7), false}, - {InternalValue("h", 24), true}, - {InternalValue("i", 24), false}, - {InternalValue("ii", 14), true}, - {InternalValue("j", 14), false}}); - - VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, - {"_", "a", true}, - {"a", "c", true}, - {"d", "f", true}, - {"g", "l", true}, - {"x", "y", false}}); -} - -TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregatorWithUpperBound) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - 19 /* snapshot */)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false}, - {InternalValue("a", 9), true}, - {InternalValue("b", 9), true}, - {InternalValue("d", 9), true}, - {InternalValue("e", 7), true}, - {InternalValue("g", 7), false}, - {InternalValue("h", 24), false}, - {InternalValue("i", 24), false}, - {InternalValue("ii", 14), true}, - {InternalValue("j", 14), false}}); - - VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, - {"_", "a", true}, - {"a", "c", true}, - {"d", "f", true}, - {"g", "l", true}, - {"x", "y", false}}); -} - -TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregator) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}}); - std::vector> iter_bounds = { - {InternalKey("a", 4, kTypeValue), - InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)}, - {InternalKey("m", 20, kTypeValue), - InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)}, - {InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}}; - - ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19); - for (size_t i = 0; i < fragment_lists.size(); i++) { - const auto& fragment_list = fragment_lists[i]; - const auto& bounds = iter_bounds[i]; - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - 19 /* snapshot */)); - range_del_agg.AddTombstones(std::move(input_iter), &bounds.first, - &bounds.second); - } - - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false}, - {InternalValue("a", 9), false}, - {InternalValue("a", 4), true}, - {InternalValue("m", 10), false}, - {InternalValue("m", 9), true}, - {InternalValue("x", 10), false}, - {InternalValue("x", 9), false}, - {InternalValue("x", 5), true}, - {InternalValue("z", 9), false}}); - - VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, - {"_", "a", true}, - {"a", "n", true}, - {"l", "x", true}, - {"w", "z", true}, - {"zzz", "zz", false}, - {"zz", "zzz", false}}); -} - -TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregatorSameLevel) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}}); - std::vector> iter_bounds = { - {InternalKey("a", 4, kTypeValue), - InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)}, - {InternalKey("m", 20, kTypeValue), - InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)}, - {InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}}; - - ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19); - - auto add_iter_to_agg = [&](size_t i) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_lists[i].get(), - bytewise_icmp, 19 /* snapshot */)); - range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first, - &iter_bounds[i].second); - }; - - add_iter_to_agg(0); - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false}, - {InternalValue("a", 9), false}, - {InternalValue("a", 4), true}}); - - add_iter_to_agg(1); - VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false}, - {InternalValue("m", 9), true}}); - - add_iter_to_agg(2); - VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false}, - {InternalValue("x", 9), false}, - {InternalValue("x", 5), true}, - {InternalValue("z", 9), false}}); - - VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false}, - {"_", "a", true}, - {"a", "n", true}, - {"l", "x", true}, - {"w", "z", true}, - {"zzz", "zz", false}, - {"zz", "zzz", false}}); -} - -TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorNoSnapshots) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true}, - {InternalValue("b", 19), false}, - {InternalValue("b", 9), true}, - {InternalValue("d", 9), true}, - {InternalValue("e", 7), true}, - {InternalValue("g", 7), false}, - {InternalValue("h", 24), true}, - {InternalValue("i", 24), false}, - {InternalValue("ii", 14), true}, - {InternalValue("j", 14), false}}); - - auto range_del_compaction_iter = range_del_agg.NewIterator(); - VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20}, - {"b", "c", 10}, - {"c", "e", 10}, - {"e", "g", 8}, - {"h", "i", 25}, - {"ii", "j", 15}}); -} - -TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorWithSnapshots) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots{9, 19}; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - VerifyShouldDelete( - &range_del_agg, - { - {InternalValue("a", 19), false}, // [10, 19] - {InternalValue("a", 9), false}, // [0, 9] - {InternalValue("b", 9), false}, // [0, 9] - {InternalValue("d", 9), false}, // [0, 9] - {InternalValue("d", 7), true}, // [0, 9] - {InternalValue("e", 7), true}, // [0, 9] - {InternalValue("g", 7), false}, // [0, 9] - {InternalValue("h", 24), true}, // [20, kMaxSequenceNumber] - {InternalValue("i", 24), false}, // [20, kMaxSequenceNumber] - {InternalValue("ii", 14), true}, // [10, 19] - {InternalValue("j", 14), false} // [10, 19] - }); - - auto range_del_compaction_iter = range_del_agg.NewIterator(); - VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20}, - {"a", "b", 10}, - {"b", "c", 10}, - {"c", "e", 10}, - {"c", "e", 8}, - {"e", "g", 8}, - {"h", "i", 25}, - {"ii", "j", 15}}); -} - -TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorLeft) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots{9, 19}; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - Slice start("_"); - Slice end("__"); -} - -TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorRight) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots{9, 19}; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - Slice start("p"); - Slice end("q"); - auto range_del_compaction_iter1 = - range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); - VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {}); - - auto range_del_compaction_iter2 = - range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); - VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {}); -} - -TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorBoundedIterator) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "e", 10}, {"c", "g", 8}}, - {{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots{9, 19}; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - Slice start("bb"); - Slice end("e"); - auto range_del_compaction_iter1 = - range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); - VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), - {{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}}); - - auto range_del_compaction_iter2 = - range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); - VerifyFragmentedRangeDels( - range_del_compaction_iter2.get(), - {{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}}); -} - -TEST_F(RangeDelAggregatorV2Test, - CompactionAggregatorBoundedIteratorExtraFragments) { - auto fragment_lists = MakeFragmentedTombstoneLists( - {{{"a", "d", 10}, {"c", "g", 8}}, - {{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}}); - - std::vector snapshots{9, 19}; - CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots); - for (const auto& fragment_list : fragment_lists) { - std::unique_ptr input_iter( - new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp, - kMaxSequenceNumber)); - range_del_agg.AddTombstones(std::move(input_iter)); - } - - Slice start("bb"); - Slice end("e"); - auto range_del_compaction_iter1 = - range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */); - VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10}, - {"b", "c", 20}, - {"b", "c", 10}, - {"c", "d", 10}, - {"c", "d", 8}, - {"d", "f", 30}, - {"d", "f", 8}, - {"f", "g", 8}}); - - auto range_del_compaction_iter2 = - range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */); - VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10}, - {"b", "c", 20}, - {"b", "c", 10}, - {"c", "d", 10}, - {"c", "d", 8}, - {"d", "f", 30}, - {"d", "f", 8}, - {"f", "g", 8}}); -} - -} // namespace rocksdb - -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/table_cache.cc b/db/table_cache.cc index 829f5b21f..5c0f95716 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -185,7 +185,7 @@ Status TableCache::FindTable(const EnvOptions& env_options, InternalIterator* TableCache::NewIterator( const ReadOptions& options, const EnvOptions& env_options, const InternalKeyComparator& icomparator, const FileMetaData& file_meta, - RangeDelAggregatorV2* range_del_agg, const SliceTransform* prefix_extractor, + RangeDelAggregator* range_del_agg, const SliceTransform* prefix_extractor, TableReader** table_reader_ptr, HistogramImpl* file_read_hist, bool for_compaction, Arena* arena, bool skip_filters, int level, const InternalKey* smallest_compaction_key, diff --git a/db/table_cache.h b/db/table_cache.h index 04485c4dc..e3936ab44 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -15,7 +15,7 @@ #include #include "db/dbformat.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "options/cf_options.h" #include "port/port.h" #include "rocksdb/cache.h" @@ -52,7 +52,7 @@ class TableCache { InternalIterator* NewIterator( const ReadOptions& options, const EnvOptions& toptions, const InternalKeyComparator& internal_comparator, - const FileMetaData& file_meta, RangeDelAggregatorV2* range_del_agg, + const FileMetaData& file_meta, RangeDelAggregator* range_del_agg, const SliceTransform* prefix_extractor = nullptr, TableReader** table_reader_ptr = nullptr, HistogramImpl* file_read_hist = nullptr, bool for_compaction = false, diff --git a/db/version_set.cc b/db/version_set.cc index eed67fd5c..2d6997601 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -459,7 +459,7 @@ class LevelIterator final : public InternalIterator { const EnvOptions& env_options, const InternalKeyComparator& icomparator, const LevelFilesBrief* flevel, const SliceTransform* prefix_extractor, bool should_sample, HistogramImpl* file_read_hist, bool for_compaction, - bool skip_filters, int level, RangeDelAggregatorV2* range_del_agg, + bool skip_filters, int level, RangeDelAggregator* range_del_agg, const std::vector* compaction_boundaries = nullptr) : table_cache_(table_cache), @@ -571,7 +571,7 @@ class LevelIterator final : public InternalIterator { bool skip_filters_; size_t file_index_; int level_; - RangeDelAggregatorV2* range_del_agg_; + RangeDelAggregator* range_del_agg_; IteratorWrapper file_iter_; // May be nullptr PinnedIteratorsManager* pinned_iters_mgr_; @@ -985,7 +985,7 @@ double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel( void Version::AddIterators(const ReadOptions& read_options, const EnvOptions& soptions, MergeIteratorBuilder* merge_iter_builder, - RangeDelAggregatorV2* range_del_agg) { + RangeDelAggregator* range_del_agg) { assert(storage_info_.finalized_); for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) { @@ -998,7 +998,7 @@ void Version::AddIteratorsForLevel(const ReadOptions& read_options, const EnvOptions& soptions, MergeIteratorBuilder* merge_iter_builder, int level, - RangeDelAggregatorV2* range_del_agg) { + RangeDelAggregator* range_del_agg) { assert(storage_info_.finalized_); if (level >= storage_info_.num_non_empty_levels()) { // This is an empty level @@ -1057,8 +1057,8 @@ Status Version::OverlapWithLevelIterator(const ReadOptions& read_options, Arena arena; Status status; - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); *overlap = false; @@ -4328,7 +4328,7 @@ void VersionSet::AddLiveFiles(std::vector* live_list) { } InternalIterator* VersionSet::MakeInputIterator( - const Compaction* c, RangeDelAggregatorV2* range_del_agg, + const Compaction* c, RangeDelAggregator* range_del_agg, const EnvOptions& env_options_compactions) { auto cfd = c->column_family_data(); ReadOptions read_options; diff --git a/db/version_set.h b/db/version_set.h index ec9084beb..b50f653ba 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -34,7 +34,7 @@ #include "db/dbformat.h" #include "db/file_indexer.h" #include "db/log_reader.h" -#include "db/range_del_aggregator_v2.h" +#include "db/range_del_aggregator.h" #include "db/read_callback.h" #include "db/table_cache.h" #include "db/version_builder.h" @@ -538,11 +538,11 @@ class Version { // REQUIRES: This version has been saved (see VersionSet::SaveTo) void AddIterators(const ReadOptions&, const EnvOptions& soptions, MergeIteratorBuilder* merger_iter_builder, - RangeDelAggregatorV2* range_del_agg); + RangeDelAggregator* range_del_agg); void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions, MergeIteratorBuilder* merger_iter_builder, - int level, RangeDelAggregatorV2* range_del_agg); + int level, RangeDelAggregator* range_del_agg); Status OverlapWithLevelIterator(const ReadOptions&, const EnvOptions&, const Slice& smallest_user_key, @@ -935,7 +935,7 @@ class VersionSet { // Create an iterator that reads over the compaction inputs for "*c". // The caller should delete the iterator when no longer needed. InternalIterator* MakeInputIterator( - const Compaction* c, RangeDelAggregatorV2* range_del_agg, + const Compaction* c, RangeDelAggregator* range_del_agg, const EnvOptions& env_options_compactions); // Add all files listed in any live version to *live. diff --git a/src.mk b/src.mk index 0926fc12c..8b3ab68d8 100644 --- a/src.mk +++ b/src.mk @@ -44,7 +44,6 @@ LIB_SOURCES = \ db/merge_helper.cc \ db/merge_operator.cc \ db/range_del_aggregator.cc \ - db/range_del_aggregator_v2.cc \ db/range_tombstone_fragmenter.cc \ db/repair.cc \ db/snapshot_impl.cc \ @@ -335,7 +334,6 @@ MAIN_SOURCES = \ db/repair_test.cc \ db/range_del_aggregator_test.cc \ db/range_del_aggregator_bench.cc \ - db/range_del_aggregator_v2_test.cc \ db/range_tombstone_fragmenter_test.cc \ db/table_properties_collector_test.cc \ db/util_merge_operators_test.cc \ diff --git a/utilities/debug.cc b/utilities/debug.cc index 3dfde980e..72fcbf0f5 100644 --- a/utilities/debug.cc +++ b/utilities/debug.cc @@ -19,8 +19,8 @@ Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key, DBImpl* idb = static_cast(db->GetRootDB()); auto icmp = InternalKeyComparator(idb->GetOptions().comparator); - ReadRangeDelAggregatorV2 range_del_agg(&icmp, - kMaxSequenceNumber /* upper_bound */); + ReadRangeDelAggregator range_del_agg(&icmp, + kMaxSequenceNumber /* upper_bound */); Arena arena; ScopedArenaIterator iter( idb->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber));