diff --git a/HISTORY.md b/HISTORY.md index 29f0f3f27..89e596756 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -8,6 +8,7 @@ ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. * Fix incorrect dropping of deletions during intra-L0 compaction. +* Fix transient reappearance of keys covered by range deletions when memtable prefix bloom filter is enabled. ## 5.7.0 (07/13/2017) ### Public API Change diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 0288f80bd..d80c9d144 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -868,6 +868,32 @@ TEST_F(DBRangeDelTest, SubcompactionHasEmptyDedicatedRangeDelFile) { db_->ReleaseSnapshot(snapshot); } +TEST_F(DBRangeDelTest, MemtableBloomFilter) { + // regression test for #2743. the range delete tombstones in memtable should + // be added even when Get() skips searching due to its prefix bloom filter + const int kMemtableSize = 1 << 20; // 1MB + const int kMemtablePrefixFilterSize = 1 << 13; // 8KB + const int kNumKeys = 1000; + const int kPrefixLen = 8; + Options options = CurrentOptions(); + options.memtable_prefix_bloom_size_ratio = + static_cast(kMemtablePrefixFilterSize) / kMemtableSize; + options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(kPrefixLen)); + options.write_buffer_size = kMemtableSize; + Reopen(options); + + for (int i = 0; i < kNumKeys; ++i) { + ASSERT_OK(Put(Key(i), "val")); + } + Flush(); + ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0), + Key(kNumKeys))); + for (int i = 0; i < kNumKeys; ++i) { + std::string value; + ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound()); + } +} + #endif // ROCKSDB_LITE } // namespace rocksdb diff --git a/db/memtable.cc b/db/memtable.cc index efea6199a..a24989123 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -643,6 +643,14 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, } PERF_TIMER_GUARD(get_from_memtable_time); + std::unique_ptr range_del_iter( + NewRangeTombstoneIterator(read_opts)); + Status status = range_del_agg->AddTombstones(std::move(range_del_iter)); + if (!status.ok()) { + *s = status; + return false; + } + Slice user_key = key.user_key(); bool found_final_value = false; bool merge_in_progress = s->IsMergeInProgress(); @@ -658,13 +666,6 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, if (prefix_bloom_) { PERF_COUNTER_ADD(bloom_memtable_hit_count, 1); } - std::unique_ptr range_del_iter( - NewRangeTombstoneIterator(read_opts)); - Status status = range_del_agg->AddTombstones(std::move(range_del_iter)); - if (!status.ok()) { - *s = status; - return false; - } Saver saver; saver.status = s; saver.found_final_value = &found_final_value;