diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index b16ca2ca6..f8fb5a910 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -260,7 +260,7 @@ void CompactionJob::Prepare() { StopWatch sw(db_options_.clock, stats_, SUBCOMPACTION_SETUP_TIME); GenSubcompactionBoundaries(); } - if (boundaries_.size() > 1) { + if (boundaries_.size() >= 1) { for (size_t i = 0; i <= boundaries_.size(); i++) { compact_->sub_compact_states.emplace_back( c, (i != 0) ? std::optional(boundaries_[i - 1]) : std::nullopt, diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 1e9721949..46ea1fcae 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -9753,6 +9753,59 @@ TEST_F(DBCompactionTest, ASSERT_EQ(0, NumTableFilesAtLevel(5)); } +TEST_F(DBCompactionTest, NumberOfSubcompactions) { + // Tests that expected number of subcompactions are created. + class SubCompactionEventListener : public EventListener { + public: + void OnSubcompactionCompleted(const SubcompactionJobInfo&) override { + sub_compaction_finished_++; + } + void OnCompactionCompleted(DB*, const CompactionJobInfo&) override { + compaction_finished_++; + } + std::atomic sub_compaction_finished_{0}; + std::atomic compaction_finished_{0}; + }; + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.compression = kNoCompression; + const int kFileSize = 100 << 10; // 100KB + options.target_file_size_base = kFileSize; + const int kLevel0CompactTrigger = 2; + options.level0_file_num_compaction_trigger = kLevel0CompactTrigger; + Destroy(options); + Random rnd(301); + + // Exposing internal implementation detail here where the + // number of subcompactions depends on the size of data + // being compacted. In particular, to enable x subcompactions, + // we need to compact at least x * target file size amount + // of data. + // + // Will write two files below to avoid trivial move. + // Size written in total: 500 * 1000 * 2 ~ 10MB ~ 100 * target file size. + const int kValueSize = 500; + const int kNumKeyPerFile = 1000; + for (int i = 1; i <= 8; ++i) { + options.max_subcompactions = i; + SubCompactionEventListener* listener = new SubCompactionEventListener(); + options.listeners.clear(); + options.listeners.emplace_back(listener); + TryReopen(options); + + for (int file = 0; file < kLevel0CompactTrigger; ++file) { + for (int key = file; key < 2 * kNumKeyPerFile; key += 2) { + ASSERT_OK(Put(Key(key), rnd.RandomString(kValueSize))); + } + ASSERT_OK(Flush()); + } + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_EQ(listener->compaction_finished_, 1); + EXPECT_EQ(listener->sub_compaction_finished_, i); + Destroy(options); + } +} + } // namespace ROCKSDB_NAMESPACE int main(int argc, char** argv) { diff --git a/unreleased_history/bug_fixes/subcompaction_2.md b/unreleased_history/bug_fixes/subcompaction_2.md new file mode 100644 index 000000000..b56fc5dea --- /dev/null +++ b/unreleased_history/bug_fixes/subcompaction_2.md @@ -0,0 +1 @@ +Fix a bug where compactions that are qualified to be run as 2 subcompactions were only run as one subcompaction. \ No newline at end of file