Avoid per-key linear scan over snapshots in compaction (#4495)

Summary:
`CompactionIterator::snapshots_` is ordered by ascending seqnum, just like `DBImpl`'s linked list of snapshots from which it was copied. This PR exploits this ordering to make `findEarliestVisibleSnapshot` do binary search rather than linear scan. This can make flush/compaction significantly faster when many snapshots exist since that function is called on every single key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4495

Differential Revision: D10386470

Pulled By: ajkr

fbshipit-source-id: 29734991631227b6b7b677e156ac567690118a8b
main
Andrew Kryczka 6 years ago committed by Facebook Github Bot
parent 0f955f2aef
commit 32b4d4ad47
  1. 1
      HISTORY.md
  2. 29
      db/compaction_iterator.cc

@ -6,6 +6,7 @@
### Bug Fixes ### Bug Fixes
* Fix corner case where a write group leader blocked due to write stall blocks other writers in queue with WriteOptions::no_slowdown set. * Fix corner case where a write group leader blocked due to write stall blocks other writers in queue with WriteOptions::no_slowdown set.
* Fix in-memory range tombstone truncation to avoid erroneously covering newer keys at a lower level, and include range tombstones in compacted files whose largest key is the range tombstone's start key. * Fix in-memory range tombstone truncation to avoid erroneously covering newer keys at a lower level, and include range tombstones in compacted files whose largest key is the range tombstone's start key.
* Fix slow flush/compaction when DB contains many snapshots. The problem became noticeable to us in DBs with 100,000+ snapshots, though it will affect others at different thresholds.
## 5.17.0 (10/05/2018) ## 5.17.0 (10/05/2018)
### Public API Change ### Public API Change

@ -77,6 +77,12 @@ CompactionIterator::CompactionIterator(
earliest_snapshot_ = snapshots_->at(0); earliest_snapshot_ = snapshots_->at(0);
latest_snapshot_ = snapshots_->back(); latest_snapshot_ = snapshots_->back();
} }
#ifndef NDEBUG
// findEarliestVisibleSnapshot assumes this ordering.
for (size_t i = 1; i < snapshots_->size(); ++i) {
assert(snapshots_->at(i - 1) <= snapshots_->at(i));
}
#endif
if (compaction_filter_ != nullptr) { if (compaction_filter_ != nullptr) {
if (compaction_filter_->IgnoreSnapshots()) { if (compaction_filter_->IgnoreSnapshots()) {
ignore_snapshots_ = true; ignore_snapshots_ = true;
@ -628,18 +634,23 @@ void CompactionIterator::PrepareOutput() {
inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot( inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
SequenceNumber in, SequenceNumber* prev_snapshot) { SequenceNumber in, SequenceNumber* prev_snapshot) {
assert(snapshots_->size()); assert(snapshots_->size());
SequenceNumber prev = kMaxSequenceNumber; auto snapshots_iter = std::lower_bound(
for (const auto cur : *snapshots_) { snapshots_->begin(), snapshots_->end(), in);
assert(prev == kMaxSequenceNumber || prev <= cur); if (snapshots_iter == snapshots_->begin()) {
if (cur >= in && (snapshot_checker_ == nullptr || *prev_snapshot = 0;
snapshot_checker_->IsInSnapshot(in, cur))) { } else {
*prev_snapshot = prev == kMaxSequenceNumber ? 0 : prev; *prev_snapshot = *std::prev(snapshots_iter);
assert(*prev_snapshot < in);
}
for (; snapshots_iter != snapshots_->end(); ++snapshots_iter) {
auto cur = *snapshots_iter;
assert(in <= cur);
if (snapshot_checker_ == nullptr ||
snapshot_checker_->IsInSnapshot(in, cur)) {
return cur; return cur;
} }
prev = cur; *prev_snapshot = cur;
assert(prev < kMaxSequenceNumber);
} }
*prev_snapshot = prev;
return kMaxSequenceNumber; return kMaxSequenceNumber;
} }

Loading…
Cancel
Save