diff --git a/db/compaction_job_stats_test.cc b/db/compaction_job_stats_test.cc index f867b338a..e481b4810 100644 --- a/db/compaction_job_stats_test.cc +++ b/db/compaction_job_stats_test.cc @@ -667,9 +667,17 @@ TEST_F(CompactionJobStatsTest, CompactionJobStatsTest) { 1, num_keys_per_L0_file * 2, compression_ratio, num_keys_per_L0_file)); - ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); + // In the second sub-compaction, we expect L1 compaction. + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + smallest_key, largest_key, + 4, 0, num_keys_per_L0_file * 8, + kKeySize, kValueSize, + 1, num_keys_per_L0_file * 8, + compression_ratio, 0)); + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 2U); Compact(1, smallest_key, largest_key); - ASSERT_EQ("0,4", FilesPerLevel(1)); + ASSERT_EQ("0,1", FilesPerLevel(1)); options.compression = GetAnyCompression(); if (options.compression == kNoCompression) { break; diff --git a/db/db_impl.cc b/db/db_impl.cc index 15db43f04..c1677eb9c 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1373,16 +1373,8 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family, // level 0 can never be the bottommost level (i.e. if all files are in // level 0, we will compact to level 1) if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal || - cfd->ioptions()->compaction_style == kCompactionStyleFIFO) { - output_level = level; - } else if (level == max_level_with_files && level > 0) { - if (cfd->ioptions()->compaction_filter == nullptr && - cfd->ioptions()->compaction_filter_factory == nullptr && - cfd->ioptions()->compaction_filter_factory_v2 == nullptr) { - // If there is no compaction filter we can skip the compaction of - // the bottommost level - continue; - } + cfd->ioptions()->compaction_style == kCompactionStyleFIFO || + (level == max_level_with_files && level > 0)) { output_level = level; } else { output_level = level + 1; diff --git a/db/db_test.cc b/db/db_test.cc index ae1a0be65..a8a6c4bdc 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -3810,59 +3810,6 @@ TEST_F(DBTest, TrivialMoveOneFile) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } -TEST_F(DBTest, TrivialMoveToLastLevelWithFiles) { - int32_t trivial_move = 0; - int32_t non_trivial_move = 0; - rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); - rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); - rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - - Options options; - options.write_buffer_size = 100000000; - options = CurrentOptions(options); - DestroyAndReopen(options); - - int32_t value_size = 10 * 1024; // 10 KB - - Random rnd(301); - std::vector values; - // File with keys [ 0 => 99 ] - for (int i = 0; i < 100; i++) { - values.push_back(RandomString(&rnd, value_size)); - ASSERT_OK(Put(Key(i), values[i])); - } - ASSERT_OK(Flush()); - ASSERT_EQ("1", FilesPerLevel(0)); - ASSERT_OK(db_->CompactRange(nullptr, nullptr, true, 3)); - ASSERT_EQ("0,0,0,1", FilesPerLevel(0)); - // Compaction did L0=>L1 (trivial move) then moved L1 files to L3 - ASSERT_EQ(trivial_move, 1); - ASSERT_EQ(non_trivial_move, 0); - - // File with keys [ 100 => 199] - for (int i = 100; i < 200; i++) { - values.push_back(RandomString(&rnd, value_size)); - ASSERT_OK(Put(Key(i), values[i])); - } - ASSERT_OK(Flush()); - ASSERT_EQ("1,0,0,1", FilesPerLevel(0)); - ASSERT_OK(db_->CompactRange(nullptr, nullptr)); - ASSERT_EQ("0,0,0,2", FilesPerLevel(0)); - // Compaction did L0=>L1 L1=>L2 L2=>L3 (3 trivial moves) - ASSERT_EQ(trivial_move, 4); - ASSERT_EQ(non_trivial_move, 0); - - for (int i = 0; i < 200; i++) { - ASSERT_EQ(Get(Key(i)), values[i]); - } - - rocksdb::SyncPoint::GetInstance()->DisableProcessing(); -} - TEST_F(DBTest, TrivialMoveNonOverlappingFiles) { int32_t trivial_move = 0; int32_t non_trivial_move = 0;