You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/db/blob/blob_counting_iterator.h

158 lines
3.9 KiB

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <cassert>
#include "db/blob/blob_garbage_meter.h"
#include "rocksdb/rocksdb_namespace.h"
#include "rocksdb/status.h"
#include "table/internal_iterator.h"
#include "test_util/sync_point.h"
namespace ROCKSDB_NAMESPACE {
// An internal iterator that passes each key-value encountered to
// BlobGarbageMeter as inflow in order to measure the total number and size of
// blobs in the compaction input on a per-blob file basis.
class BlobCountingIterator : public InternalIterator {
public:
BlobCountingIterator(InternalIterator* iter,
BlobGarbageMeter* blob_garbage_meter)
: iter_(iter), blob_garbage_meter_(blob_garbage_meter) {
assert(iter_);
assert(blob_garbage_meter_);
UpdateAndCountBlobIfNeeded();
}
bool Valid() const override { return iter_->Valid() && status_.ok(); }
void SeekToFirst() override {
iter_->SeekToFirst();
UpdateAndCountBlobIfNeeded();
}
void SeekToLast() override {
iter_->SeekToLast();
UpdateAndCountBlobIfNeeded();
}
void Seek(const Slice& target) override {
iter_->Seek(target);
UpdateAndCountBlobIfNeeded();
}
void SeekForPrev(const Slice& target) override {
iter_->SeekForPrev(target);
UpdateAndCountBlobIfNeeded();
}
void Next() override {
assert(Valid());
iter_->Next();
UpdateAndCountBlobIfNeeded();
}
bool NextAndGetResult(IterateResult* result) override {
assert(Valid());
const bool res = iter_->NextAndGetResult(result);
UpdateAndCountBlobIfNeeded();
return res;
}
void Prev() override {
assert(Valid());
iter_->Prev();
UpdateAndCountBlobIfNeeded();
}
Slice key() const override {
assert(Valid());
return iter_->key();
}
Slice user_key() const override {
assert(Valid());
return iter_->user_key();
}
Slice value() const override {
assert(Valid());
return iter_->value();
}
Status status() const override { return status_; }
bool PrepareValue() override {
assert(Valid());
return iter_->PrepareValue();
}
bool MayBeOutOfLowerBound() override {
assert(Valid());
return iter_->MayBeOutOfLowerBound();
}
IterBoundCheck UpperBoundCheckResult() override {
assert(Valid());
return iter_->UpperBoundCheckResult();
}
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
iter_->SetPinnedItersMgr(pinned_iters_mgr);
}
bool IsKeyPinned() const override {
assert(Valid());
return iter_->IsKeyPinned();
}
bool IsValuePinned() const override {
assert(Valid());
return iter_->IsValuePinned();
}
Status GetProperty(std::string prop_name, std::string* prop) override {
return iter_->GetProperty(prop_name, prop);
}
Consider range tombstone in compaction output file cutting (#10802) Summary: This PR is the first step for Issue https://github.com/facebook/rocksdb/issues/4811. Currently compaction output files are cut at point keys, and the decision is made mainly in `CompactionOutputs::ShouldStopBefore()`. This makes it possible for range tombstones to cause large compactions that does not respect `max_compaction_bytes`. For example, we can have a large range tombstone that overlaps with too many files from the next level. Another example is when there is a gap between a range tombstone and another key. The first issue may be more acceptable, as a lot of data is deleted. This PR address the second issue by calling `ShouldStopBefore()` for range tombstone start keys. The main change is for `CompactionIterator` to emit range tombstone start keys to be processed by `CompactionOutputs`. A new `CompactionMergingIterator` is introduced and only used under `CompactionIterator` for this purpose. Further improvement after this PR include 1) cut compaction output at some grandparent boundary key instead of at the next point key or range tombstone start key and 2) cut compaction output file within a large range tombstone (it may be easier and reasonable to only do it for range tombstones at the end of a compaction output). Pull Request resolved: https://github.com/facebook/rocksdb/pull/10802 Test Plan: - added unit tests in db_range_del_test. - stress test: `python3 tools/db_crashtest.py whitebox --[simple|enable_ts] --verify_iterator_with_expected_state_one_in=5 --delrangepercent=5 --prefixpercent=2 --writepercent=58 --readpercen=21 --duration=36000 --range_deletion_width=1000000` Reviewed By: ajkr, jay-zhuang Differential Revision: D40308827 Pulled By: cbi42 fbshipit-source-id: a8fd6f70a3f09d0ef7a40e006f6c964bba8c00df
2 years ago
bool IsDeleteRangeSentinelKey() const override {
return iter_->IsDeleteRangeSentinelKey();
}
private:
void UpdateAndCountBlobIfNeeded() {
assert(!iter_->Valid() || iter_->status().ok());
if (!iter_->Valid()) {
status_ = iter_->status();
return;
Consider range tombstone in compaction output file cutting (#10802) Summary: This PR is the first step for Issue https://github.com/facebook/rocksdb/issues/4811. Currently compaction output files are cut at point keys, and the decision is made mainly in `CompactionOutputs::ShouldStopBefore()`. This makes it possible for range tombstones to cause large compactions that does not respect `max_compaction_bytes`. For example, we can have a large range tombstone that overlaps with too many files from the next level. Another example is when there is a gap between a range tombstone and another key. The first issue may be more acceptable, as a lot of data is deleted. This PR address the second issue by calling `ShouldStopBefore()` for range tombstone start keys. The main change is for `CompactionIterator` to emit range tombstone start keys to be processed by `CompactionOutputs`. A new `CompactionMergingIterator` is introduced and only used under `CompactionIterator` for this purpose. Further improvement after this PR include 1) cut compaction output at some grandparent boundary key instead of at the next point key or range tombstone start key and 2) cut compaction output file within a large range tombstone (it may be easier and reasonable to only do it for range tombstones at the end of a compaction output). Pull Request resolved: https://github.com/facebook/rocksdb/pull/10802 Test Plan: - added unit tests in db_range_del_test. - stress test: `python3 tools/db_crashtest.py whitebox --[simple|enable_ts] --verify_iterator_with_expected_state_one_in=5 --delrangepercent=5 --prefixpercent=2 --writepercent=58 --readpercen=21 --duration=36000 --range_deletion_width=1000000` Reviewed By: ajkr, jay-zhuang Differential Revision: D40308827 Pulled By: cbi42 fbshipit-source-id: a8fd6f70a3f09d0ef7a40e006f6c964bba8c00df
2 years ago
} else if (iter_->IsDeleteRangeSentinelKey()) {
// CompactionMergingIterator emits range tombstones, and range tombstone
// keys can be truncated at file boundaries. This means the range
// tombstone keys can have op_type kTypeBlobIndex.
// This could crash the ProcessInFlow() call below since
// value is empty for these keys.
return;
}
TEST_SYNC_POINT(
"BlobCountingIterator::UpdateAndCountBlobIfNeeded:ProcessInFlow");
status_ = blob_garbage_meter_->ProcessInFlow(key(), value());
}
InternalIterator* iter_;
BlobGarbageMeter* blob_garbage_meter_;
Status status_;
};
} // namespace ROCKSDB_NAMESPACE