diff --git a/HISTORY.md b/HISTORY.md index e791cfab2..87fa2f84a 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,7 +1,9 @@ # Rocksdb Change Log ## Unreleased -### Buf Fixes +### Bug Fixes * Fix a bug that can cause unnecessary bg thread to be scheduled(#6104). +* Fix a bug in which a snapshot read could be affected by a DeleteRange after the snapshot (#6062). + ## 6.6.0 (11/25/2019) ### Bug Fixes * Fix data corruption caused by output of intra-L0 compaction on ingested file not being placed in correct order in L0. diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index ec448b731..f2953a746 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -1427,6 +1427,47 @@ TEST_F(DBRangeDelTest, SnapshotPreventsDroppedKeys) { db_->ReleaseSnapshot(snapshot); } +TEST_F(DBRangeDelTest, SnapshotPreventsDroppedKeysInImmMemTables) { + const int kFileBytes = 1 << 20; + + Options options = CurrentOptions(); + options.compression = kNoCompression; + options.disable_auto_compactions = true; + options.target_file_size_base = kFileBytes; + Reopen(options); + + // block flush thread -> pin immtables in memory + SyncPoint::GetInstance()->DisableProcessing(); + SyncPoint::GetInstance()->LoadDependency({ + {"SnapshotPreventsDroppedKeysInImmMemTables:AfterNewIterator", + "DBImpl::BGWorkFlush"}, + }); + SyncPoint::GetInstance()->EnableProcessing(); + + ASSERT_OK(Put(Key(0), "a")); + std::unique_ptr> + snapshot(db_->GetSnapshot(), + [this](const Snapshot* s) { db_->ReleaseSnapshot(s); }); + + ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0), + Key(10))); + + ASSERT_OK(dbfull()->TEST_SwitchMemtable()); + + ReadOptions read_opts; + read_opts.snapshot = snapshot.get(); + std::unique_ptr iter(db_->NewIterator(read_opts)); + + TEST_SYNC_POINT("SnapshotPreventsDroppedKeysInImmMemTables:AfterNewIterator"); + + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(Key(0), iter->key()); + + iter->Next(); + ASSERT_FALSE(iter->Valid()); +} + TEST_F(DBRangeDelTest, RangeTombstoneWrittenToMinimalSsts) { // Adapted from // https://github.com/cockroachdb/cockroach/blob/de8b3ea603dd1592d9dc26443c2cc92c356fbc2f/pkg/storage/engine/rocksdb_test.go#L1267-L1398. diff --git a/db/memtable_list.cc b/db/memtable_list.cc index d9159b793..93ab2e77c 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -186,11 +186,14 @@ Status MemTableListVersion::AddRangeTombstoneIterators( const ReadOptions& read_opts, Arena* /*arena*/, RangeDelAggregator* range_del_agg) { assert(range_del_agg != nullptr); + // Except for snapshot read, using kMaxSequenceNumber is OK because these + // are immutable memtables. + SequenceNumber read_seq = read_opts.snapshot != nullptr + ? read_opts.snapshot->GetSequenceNumber() + : kMaxSequenceNumber; for (auto& m : memlist_) { - // Using kMaxSequenceNumber is OK because these are immutable memtables. std::unique_ptr range_del_iter( - m->NewRangeTombstoneIterator(read_opts, - kMaxSequenceNumber /* read_seq */)); + m->NewRangeTombstoneIterator(read_opts, read_seq)); range_del_agg->AddTombstones(std::move(range_del_iter)); } return Status::OK();