From 1a88c437512af914da88ba462a11872f67ca72a5 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 31 Aug 2018 12:18:11 -0700 Subject: [PATCH] Reduce empty SST creation/deletion in compaction (#4336) Summary: This is a followup to #4311. Checking `!RangeDelAggregator::IsEmpty()` before opening a dedicated range tombstone SST did not properly prevent empty SSTs from being generated. That's because it relies on `CollapsedRangeDelMap::Size`, which had an underflow bug when the map was empty. This PR fixes that underflow bug. Also fixed an uninitialized variable in db_stress. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4336 Differential Revision: D9600080 Pulled By: ajkr fbshipit-source-id: bc6980ca79d2cd01b825ebc9dbccd51c1a70cfc7 --- db/db_compaction_test.cc | 40 ++++++++++++++++++++++++++++++++++++++ db/range_del_aggregator.cc | 2 +- tools/db_stress.cc | 3 ++- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 55c965c92..423480cd9 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -116,6 +116,22 @@ private: std::vector> compaction_completed_; }; +class SstStatsCollector : public EventListener { + public: + SstStatsCollector() : num_ssts_creation_started_(0) {} + + void OnTableFileCreationStarted(const TableFileCreationBriefInfo& /* info */) override { + ++num_ssts_creation_started_; + } + + int num_ssts_creation_started() { + return num_ssts_creation_started_; + } + + private: + std::atomic num_ssts_creation_started_; +}; + static const int kCDTValueSize = 1000; static const int kCDTKeysPerBuffer = 4; static const int kCDTNumLevels = 8; @@ -3822,6 +3838,30 @@ TEST_F(DBCompactionTest, CompactFilesOutputRangeConflict) { bg_thread.join(); } +TEST_F(DBCompactionTest, CompactionHasEmptyOutput) { + Options options = CurrentOptions(); + SstStatsCollector* collector = new SstStatsCollector(); + options.level0_file_num_compaction_trigger = 2; + options.listeners.emplace_back(collector); + Reopen(options); + + // Make sure the L0 files overlap to prevent trivial move. + ASSERT_OK(Put("a", "val")); + ASSERT_OK(Put("b", "val")); + ASSERT_OK(Flush()); + ASSERT_OK(Delete("a")); + ASSERT_OK(Delete("b")); + ASSERT_OK(Flush()); + + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + + // Expect one file creation to start for each flush, and zero for compaction + // since no keys are written. + ASSERT_EQ(2, collector->num_ssts_creation_started()); +} + INSTANTIATE_TEST_CASE_P(DBCompactionTestWithParam, DBCompactionTestWithParam, ::testing::Values(std::make_tuple(1, true), std::make_tuple(1, false), diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index c9a65af4a..e488d4f6e 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -346,7 +346,7 @@ class CollapsedRangeDelMap : public RangeDelMap { } } - size_t Size() const override { return rep_.size() - 1; } + size_t Size() const override { return rep_.empty() ? 0 : rep_.size() - 1; } void InvalidatePosition() override { iter_ = rep_.end(); } diff --git a/tools/db_stress.cc b/tools/db_stress.cc index dfd8d7f41..7b8dce95a 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -1238,7 +1238,8 @@ class DbStressListener : public EventListener { const std::vector& db_paths, const std::vector& column_families) : db_name_(db_name), db_paths_(db_paths), - column_families_(column_families) {} + column_families_(column_families), + num_pending_file_creations_(0) {} virtual ~DbStressListener() { assert(num_pending_file_creations_ == 0); }