Fix segfault in Iterator::Refresh() (#10739)

Summary:
when a new internal iterator is constructed during iterator refresh, pointer to the previous memtable range tombstone iterator was not cleared. This could cause segfault for future `Refresh()` calls when they try to free the memtable range tombstones. This PR fixes this issue.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10739

Test Plan: added a unit test in db_range_del_test.cc to reproduce this issue.

Reviewed By: ajkr, riversand963

Differential Revision: D39825283

Pulled By: cbi42

fbshipit-source-id: 3b59a2b73865aed39e28cdd5c1b57eed7991b94c
main
Changyu Bi 2 years ago committed by Facebook GitHub Bot
parent aed30ddf21
commit df492791b6
  1. 1
      HISTORY.md
  2. 11
      db/arena_wrapped_db_iter.cc
  3. 1
      db/db_iter.cc
  4. 17
      db/db_range_del_test.cc

@ -4,6 +4,7 @@
* Fix a bug in io_uring_prep_cancel in AbortIO API for posix which expects sqe->addr to match with read request submitted and wrong paramter was being passed.
* Fixed a regression in iterator performance when the entire DB is a single memtable introduced in #10449. The fix is in #10705 and #10716.
* Fixed an optimistic transaction validation bug caused by DBImpl::GetLatestSequenceForKey() returning non-latest seq for merge (#10724).
* Fixed a bug in iterator refresh which could segfault for DeleteRange users (#10739).
## 7.7.0 (09/18/2022)
### Bug Fixes

@ -45,6 +45,7 @@ void ArenaWrappedDBIter::Init(
sv_number_ = version_number;
read_options_ = read_options;
allow_refresh_ = allow_refresh;
memtable_range_tombstone_iter_ = nullptr;
}
Status ArenaWrappedDBIter::Refresh() {
@ -92,9 +93,15 @@ Status ArenaWrappedDBIter::Refresh() {
auto t = sv->mem->NewRangeTombstoneIterator(
read_options_, latest_seq, false /* immutable_memtable */);
if (!t || t->empty()) {
// If memtable_range_tombstone_iter_ points to a non-empty tombstone
// iterator, then it means sv->mem is not the memtable that
// memtable_range_tombstone_iter_ points to, so SV must have changed
// after the sv_number_ != cur_sv_number check above. We will fall
// back to re-init the InternalIterator, and the tombstone iterator
// will be freed during db_iter destruction there.
if (memtable_range_tombstone_iter_) {
delete *memtable_range_tombstone_iter_;
*memtable_range_tombstone_iter_ = nullptr;
assert(!*memtable_range_tombstone_iter_ ||
sv_number_ != cfd_->GetSuperVersionNumber());
}
delete t;
} else { // current mutable memtable has range tombstones

@ -89,6 +89,7 @@ DBIter::DBIter(Env* _env, const ReadOptions& read_options,
if (iter_.iter()) {
iter_.iter()->SetPinnedItersMgr(&pinned_iters_mgr_);
}
status_.PermitUncheckedError();
assert(timestamp_size_ == user_comparator_.timestamp_size());
}

@ -2687,6 +2687,23 @@ TEST_F(DBRangeDelTest, PrefixSentinelKey) {
delete iter;
}
TEST_F(DBRangeDelTest, RefreshMemtableIter) {
Options options = CurrentOptions();
options.disable_auto_compactions = true;
DestroyAndReopen(options);
ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
ReadOptions ro;
ro.read_tier = kMemtableTier;
std::unique_ptr<Iterator> iter{db_->NewIterator(ro)};
ASSERT_OK(Flush());
// First refresh reinits iter, which had a bug where
// iter.memtable_range_tombstone_iter_ was not set to nullptr, and caused
// subsequent refresh to double free.
ASSERT_OK(iter->Refresh());
ASSERT_OK(iter->Refresh());
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

Loading…
Cancel
Save