MergeHelper::FilterMerge() calling ElapsedNanosSafe() upon exit even … (#7867)

Summary:
…when unused.  Causes many calls to clock_gettime, impacting performance.

Was looking for something else via Linux "perf" command when I spotted heavy usage of clock_gettime during a compaction.  Our product heavily uses the rocksdb::Options::merge_operator.  MergeHelper::FilterMerge() properly tests if timing is enabled/disabled upon entry, but not on exit.  This patch fixes the exit.

Note:  the entry test also verifies if "nullptr!=stats_".  This test is redundant to code within ShouldReportDetailedTime().  Therefore I omitted it in my change.

merge_test.cc updated with test that shows failure before merge_helper.cc change ... and fix after change.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7867

Reviewed By: jay-zhuang

Differential Revision: D25960175

Pulled By: ajkr

fbshipit-source-id: 56e66d7eb6ae5eae89c8e0d5a262bd2905a226b6
main
Matthew Von-Maszewski 4 years ago committed by Facebook GitHub Bot
parent e18a4df62a
commit 12a8be1d44
  1. 3
      db/merge_helper.cc
  2. 40
      db/merge_test.cc

@ -183,7 +183,6 @@ Status MergeHelper::MergeUntil(InternalIterator* iter,
assert(IsValueType(ikey.type)); assert(IsValueType(ikey.type));
if (ikey.type != kTypeMerge) { if (ikey.type != kTypeMerge) {
// hit a put/delete/single delete // hit a put/delete/single delete
// => merge the put value or a nullptr with operands_ // => merge the put value or a nullptr with operands_
// => store result in operands_.back() (and update keys_.back()) // => store result in operands_.back() (and update keys_.back())
@ -414,7 +413,9 @@ CompactionFilter::Decision MergeHelper::FilterMerge(const Slice& user_key,
kValueTypeForSeek); kValueTypeForSeek);
} }
} }
if (stats_ != nullptr && ShouldReportDetailedTime(env_, stats_)) {
total_filter_time_ += filter_timer_.ElapsedNanosSafe(); total_filter_time_ += filter_timer_.ElapsedNanosSafe();
}
return ret; return ret;
} }

@ -49,11 +49,7 @@ class CountMergeOperator : public AssociativeMergeOperator {
return true; return true;
} }
return mergeOperator_->PartialMerge( return mergeOperator_->PartialMerge(key, *existing_value, value, new_value,
key,
*existing_value,
value,
new_value,
logger); logger);
} }
@ -73,6 +69,29 @@ class CountMergeOperator : public AssociativeMergeOperator {
std::shared_ptr<MergeOperator> mergeOperator_; std::shared_ptr<MergeOperator> mergeOperator_;
}; };
class EnvMergeTest : public EnvWrapper {
public:
EnvMergeTest() : EnvWrapper(Env::Default()) {}
// ~EnvMergeTest() override {}
uint64_t NowNanos() override {
++now_nanos_count_;
return target()->NowNanos();
}
static uint64_t now_nanos_count_;
static std::unique_ptr<EnvMergeTest> singleton_;
static EnvMergeTest* GetInstance() {
if (nullptr == singleton_) singleton_.reset(new EnvMergeTest);
return singleton_.get();
}
};
uint64_t EnvMergeTest::now_nanos_count_{0};
std::unique_ptr<EnvMergeTest> EnvMergeTest::singleton_;
std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false, std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
const size_t max_successive_merges = 0) { const size_t max_successive_merges = 0) {
DB* db; DB* db;
@ -80,6 +99,7 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
options.create_if_missing = true; options.create_if_missing = true;
options.merge_operator = std::make_shared<CountMergeOperator>(); options.merge_operator = std::make_shared<CountMergeOperator>();
options.max_successive_merges = max_successive_merges; options.max_successive_merges = max_successive_merges;
options.env = EnvMergeTest::GetInstance();
EXPECT_OK(DestroyDB(dbname, Options())); EXPECT_OK(DestroyDB(dbname, Options()));
Status s; Status s;
// DBWithTTL is not supported in ROCKSDB_LITE // DBWithTTL is not supported in ROCKSDB_LITE
@ -106,7 +126,6 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
// set, add, get and remove // set, add, get and remove
// This is a quick implementation without a Merge operation. // This is a quick implementation without a Merge operation.
class Counters { class Counters {
protected: protected:
std::shared_ptr<DB> db_; std::shared_ptr<DB> db_;
@ -190,7 +209,6 @@ class Counters {
return get(key, &base) && set(key, base + value); return get(key, &base) && set(key, base + value);
} }
// convenience functions for testing // convenience functions for testing
void assert_set(const std::string& key, uint64_t value) { void assert_set(const std::string& key, uint64_t value) {
assert(set(key, value)); assert(set(key, value));
@ -220,9 +238,7 @@ class MergeBasedCounters : public Counters {
public: public:
explicit MergeBasedCounters(std::shared_ptr<DB> db, uint64_t defaultCount = 0) explicit MergeBasedCounters(std::shared_ptr<DB> db, uint64_t defaultCount = 0)
: Counters(db, defaultCount), : Counters(db, defaultCount), merge_option_() {}
merge_option_() {
}
// mapped to a rocksdb Merge operation // mapped to a rocksdb Merge operation
bool add(const std::string& key, uint64_t value) override { bool add(const std::string& key, uint64_t value) override {
@ -250,7 +266,6 @@ void dumpDb(DB* db) {
} }
void testCounters(Counters& counters, DB* db, bool test_compaction) { void testCounters(Counters& counters, DB* db, bool test_compaction) {
FlushOptions o; FlushOptions o;
o.wait = true; o.wait = true;
@ -392,7 +407,6 @@ void testCountersWithFlushAndCompaction(Counters& counters, DB* db) {
void testSuccessiveMerge(Counters& counters, size_t max_num_merges, void testSuccessiveMerge(Counters& counters, size_t max_num_merges,
size_t num_merges) { size_t num_merges) {
counters.assert_remove("z"); counters.assert_remove("z");
uint64_t sum = 0; uint64_t sum = 0;
@ -449,6 +463,7 @@ void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(tmp_sum, counters->assert_get("c")); ASSERT_EQ(tmp_sum, counters->assert_get("c"));
ASSERT_EQ(num_partial_merge_calls, 0U); ASSERT_EQ(num_partial_merge_calls, 0U);
ASSERT_EQ(EnvMergeTest::now_nanos_count_, 0U);
} }
void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges, void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
@ -486,7 +501,6 @@ void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
} }
void runTest(const std::string& dbname, const bool use_ttl = false) { void runTest(const std::string& dbname, const bool use_ttl = false) {
{ {
auto db = OpenDb(dbname, use_ttl); auto db = OpenDb(dbname, use_ttl);

Loading…
Cancel
Save