From 7b9e970042c8eb610cd355f5b7b038d5d4845b0c Mon Sep 17 00:00:00 2001 From: Changyu Bi Date: Tue, 23 Aug 2022 11:06:09 -0700 Subject: [PATCH] Optionally issue `DeleteRange` in `*whilewriting` benchmarks (#10552) Summary: Optionally issue DeleteRange in `*whilewriting` benchmarks. This happens in `BGWriter` and uses similar logic as in `DoWrite` to issue DeleteRange operations. I added this when I was benchmarking https://github.com/facebook/rocksdb/issues/10547, but this should be an independent PR. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10552 Test Plan: ran some benchmarks with various delete range options, e.g. `./db_bench --benchmarks=readwhilewriting --writes_per_range_tombstone=100 --writes=200000 --reads=1000000 --disable_auto_compactions --max_num_range_tombstones=10000` Reviewed By: ajkr Differential Revision: D38927020 Pulled By: cbi42 fbshipit-source-id: 31ee20cb8127f7173f0816ea0cc2a204ec02aad6 --- tools/db_bench_tool.cc | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index ddee9c057..881e35c2a 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -6900,6 +6900,19 @@ class Benchmark { std::unique_ptr key_guard; Slice key = AllocateKey(&key_guard); std::unique_ptr ts_guard; + std::unique_ptr begin_key_guard; + Slice begin_key = AllocateKey(&begin_key_guard); + std::unique_ptr end_key_guard; + Slice end_key = AllocateKey(&end_key_guard); + uint64_t num_range_deletions = 0; + std::vector> expanded_key_guards; + std::vector expanded_keys; + if (FLAGS_expand_range_tombstones) { + expanded_key_guards.resize(range_tombstone_width_); + for (auto& expanded_key_guard : expanded_key_guards) { + expanded_keys.emplace_back(AllocateKey(&expanded_key_guard)); + } + } if (user_timestamp_size_ > 0) { ts_guard.reset(new char[user_timestamp_size_]); } @@ -6962,6 +6975,45 @@ class Benchmark { key.size() + val.size(), Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kWrite); } + + if (writes_per_range_tombstone_ > 0 && + written > writes_before_delete_range_ && + (written - writes_before_delete_range_) / + writes_per_range_tombstone_ <= + max_num_range_tombstones_ && + (written - writes_before_delete_range_) % + writes_per_range_tombstone_ == + 0) { + num_range_deletions++; + int64_t begin_num = thread->rand.Next() % FLAGS_num; + if (FLAGS_expand_range_tombstones) { + for (int64_t offset = 0; offset < range_tombstone_width_; ++offset) { + GenerateKeyFromInt(begin_num + offset, FLAGS_num, + &expanded_keys[offset]); + if (!db->Delete(write_options_, expanded_keys[offset]).ok()) { + fprintf(stderr, "delete error: %s\n", s.ToString().c_str()); + exit(1); + } + } + } else { + GenerateKeyFromInt(begin_num, FLAGS_num, &begin_key); + GenerateKeyFromInt(begin_num + range_tombstone_width_, FLAGS_num, + &end_key); + if (!db->DeleteRange(write_options_, db->DefaultColumnFamily(), + begin_key, end_key) + .ok()) { + fprintf(stderr, "deleterange error: %s\n", s.ToString().c_str()); + exit(1); + } + } + thread->stats.FinishedOps(&db_, db_.db, 1, kWrite); + // TODO: DeleteRange is not included in calculcation of bytes/rate + // limiter request + } + } + if (num_range_deletions > 0) { + std::cout << "Number of range deletions: " << num_range_deletions + << std::endl; } thread->stats.AddBytes(bytes); }