From b36ec37a4b8bc6e6b17ea386c6abb29abc76900b Mon Sep 17 00:00:00 2001 From: Jay Zhuang Date: Wed, 26 Oct 2022 12:35:12 -0700 Subject: [PATCH] clang-format for db/compaction (#10882) Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/10882 Reviewed By: riversand963 Differential Revision: D40724867 Pulled By: jay-zhuang fbshipit-source-id: 7f387724f8cd07d8d2b90566a515a4e9078d21f1 --- db/compaction/compaction_iterator.cc | 16 +- db/compaction/compaction_iterator_test.cc | 5 +- db/compaction/compaction_job.cc | 15 +- db/compaction/compaction_job.h | 6 +- db/compaction/compaction_job_stats_test.cc | 271 +++++++------------ db/compaction/compaction_job_test.cc | 7 +- db/compaction/compaction_picker_universal.cc | 7 +- 7 files changed, 134 insertions(+), 193 deletions(-) diff --git a/db/compaction/compaction_iterator.cc b/db/compaction/compaction_iterator.cc index b7238d857..ad4cac844 100644 --- a/db/compaction/compaction_iterator.cc +++ b/db/compaction/compaction_iterator.cc @@ -825,8 +825,8 @@ void CompactionIterator::NextFromInput() { cmp_with_history_ts_low_ < 0)) && bottommost_level_) { // Handle the case where we have a delete key at the bottom most level - // We can skip outputting the key iff there are no subsequent puts for this - // key + // We can skip outputting the key iff there are no subsequent puts for + // this key assert(!compaction_ || compaction_->KeyNotExistsBeyondOutputLevel( ikey_.user_key, &level_ptrs_)); ParsedInternalKey next_ikey; @@ -853,8 +853,8 @@ void CompactionIterator::NextFromInput() { DefinitelyNotInSnapshot(next_ikey.sequence, prev_snapshot))) { AdvanceInputIter(); } - // If you find you still need to output a row with this key, we need to output the - // delete too + // If you find you still need to output a row with this key, we need to + // output the delete too if (input_.Valid() && (ParseInternalKey(input_.key(), &next_ikey, allow_data_in_errors_) .ok()) && @@ -1212,8 +1212,8 @@ inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot( ROCKS_LOG_FATAL(info_log_, "No snapshot left in findEarliestVisibleSnapshot"); } - auto snapshots_iter = std::lower_bound( - snapshots_->begin(), snapshots_->end(), in); + auto snapshots_iter = + std::lower_bound(snapshots_->begin(), snapshots_->end(), in); assert(prev_snapshot != nullptr); if (snapshots_iter == snapshots_->begin()) { *prev_snapshot = 0; @@ -1228,8 +1228,8 @@ inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot( } } if (snapshot_checker_ == nullptr) { - return snapshots_iter != snapshots_->end() - ? *snapshots_iter : kMaxSequenceNumber; + return snapshots_iter != snapshots_->end() ? *snapshots_iter + : kMaxSequenceNumber; } bool has_released_snapshot = !released_snapshots_.empty(); for (; snapshots_iter != snapshots_->end(); ++snapshots_iter) { diff --git a/db/compaction/compaction_iterator_test.cc b/db/compaction/compaction_iterator_test.cc index 0bc3cd713..5e0768d15 100644 --- a/db/compaction/compaction_iterator_test.cc +++ b/db/compaction/compaction_iterator_test.cc @@ -203,7 +203,8 @@ class TestSnapshotChecker : public SnapshotChecker { public: explicit TestSnapshotChecker( SequenceNumber last_committed_sequence, - const std::unordered_map& snapshots = {{}}) + const std::unordered_map& snapshots = + {{}}) : last_committed_sequence_(last_committed_sequence), snapshots_(snapshots) {} @@ -1038,7 +1039,7 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest, TEST_F(CompactionIteratorWithSnapshotCheckerTest, NotRemoveDeletionIfValuePresentToEarlierSnapshot) { - AddSnapshot(2,1); + AddSnapshot(2, 1); RunTest({test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 1, kTypeValue), test::KeyStr("b", 3, kTypeValue)}, {"", "", ""}, diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 6a3ebc001..1da1bcda8 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -714,11 +714,12 @@ Status CompactionJob::Run() { break; } // Verify that the table is usable - // We set for_compaction to false and don't OptimizeForCompactionTableRead - // here because this is a special case after we finish the table building - // No matter whether use_direct_io_for_flush_and_compaction is true, - // we will regard this verification as user reads since the goal is - // to cache it here for further user reads + // We set for_compaction to false and don't + // OptimizeForCompactionTableRead here because this is a special case + // after we finish the table building No matter whether + // use_direct_io_for_flush_and_compaction is true, we will regard this + // verification as user reads since the goal is to cache it here for + // further user reads ReadOptions read_options; InternalIterator* iter = cfd->table_cache()->NewIterator( read_options, file_options_, cfd->internal_comparator(), @@ -764,8 +765,8 @@ Status CompactionJob::Run() { } }; for (size_t i = 1; i < compact_->sub_compact_states.size(); i++) { - thread_pool.emplace_back(verify_table, - std::ref(compact_->sub_compact_states[i].status)); + thread_pool.emplace_back( + verify_table, std::ref(compact_->sub_compact_states[i].status)); } verify_table(compact_->sub_compact_states[0].status); for (auto& thread : thread_pool) { diff --git a/db/compaction/compaction_job.h b/db/compaction/compaction_job.h index dac7de56d..bfbce1011 100644 --- a/db/compaction/compaction_job.h +++ b/db/compaction/compaction_job.h @@ -261,12 +261,12 @@ class CompactionJob { Status OpenCompactionOutputFile(SubcompactionState* sub_compact, CompactionOutputs& outputs); void UpdateCompactionJobStats( - const InternalStats::CompactionStats& stats) const; + const InternalStats::CompactionStats& stats) const; void RecordDroppedKeys(const CompactionIterationStats& c_iter_stats, CompactionJobStats* compaction_job_stats = nullptr); - void UpdateCompactionInputStatsHelper( - int* num_files, uint64_t* bytes_read, int input_level); + void UpdateCompactionInputStatsHelper(int* num_files, uint64_t* bytes_read, + int input_level); void NotifyOnSubcompactionBegin(SubcompactionState* sub_compact); diff --git a/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc index b25191f22..930270778 100644 --- a/db/compaction/compaction_job_stats_test.cc +++ b/db/compaction/compaction_job_stats_test.cc @@ -155,9 +155,8 @@ class CompactionJobStatsTest : public testing::Test, ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); } - Status TryReopenWithColumnFamilies( - const std::vector& cfs, - const std::vector& options) { + Status TryReopenWithColumnFamilies(const std::vector& cfs, + const std::vector& options) { Close(); EXPECT_EQ(cfs.size(), options.size()); std::vector column_families; @@ -175,9 +174,7 @@ class CompactionJobStatsTest : public testing::Test, return TryReopenWithColumnFamilies(cfs, v_opts); } - void Reopen(const Options& options) { - ASSERT_OK(TryReopen(options)); - } + void Reopen(const Options& options) { ASSERT_OK(TryReopen(options)); } void Close() { for (auto h : handles_) { @@ -226,9 +223,7 @@ class CompactionJobStatsTest : public testing::Test, return db_->Put(wo, handles_[cf], k, v); } - Status Delete(const std::string& k) { - return db_->Delete(WriteOptions(), k); - } + Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); } Status Delete(int cf, const std::string& k) { return db_->Delete(WriteOptions(), handles_[cf], k); @@ -338,21 +333,21 @@ class CompactionJobStatsTest : public testing::Test, } } - static void SetDeletionCompactionStats( - CompactionJobStats *stats, uint64_t input_deletions, - uint64_t expired_deletions, uint64_t records_replaced) { + static void SetDeletionCompactionStats(CompactionJobStats* stats, + uint64_t input_deletions, + uint64_t expired_deletions, + uint64_t records_replaced) { stats->num_input_deletion_records = input_deletions; stats->num_expired_deletion_records = expired_deletions; stats->num_records_replaced = records_replaced; } - void MakeTableWithKeyValues( - Random* rnd, uint64_t smallest, uint64_t largest, - int key_size, int value_size, uint64_t interval, - double ratio, int cf = 0) { + void MakeTableWithKeyValues(Random* rnd, uint64_t smallest, uint64_t largest, + int key_size, int value_size, uint64_t interval, + double ratio, int cf = 0) { for (auto key = smallest; key < largest; key += interval) { ASSERT_OK(Put(cf, Slice(Key(key, key_size)), - Slice(RandomString(rnd, value_size, ratio)))); + Slice(RandomString(rnd, value_size, ratio)))); } ASSERT_OK(Flush(cf)); } @@ -361,9 +356,9 @@ class CompactionJobStatsTest : public testing::Test, // rounds of keys are inserted into the database, as per the behavior // of the DeletionStatsTest. void SelectivelyDeleteKeys(uint64_t smallest, uint64_t largest, - uint64_t interval, int deletion_interval, int key_size, - uint64_t cutoff_key_num, CompactionJobStats* stats, int cf = 0) { - + uint64_t interval, int deletion_interval, + int key_size, uint64_t cutoff_key_num, + CompactionJobStats* stats, int cf = 0) { // interval needs to be >= 2 so that deletion entries can be inserted // that are intended to not result in an actual key deletion by using // an offset of 1 from another existing key @@ -387,20 +382,19 @@ class CompactionJobStatsTest : public testing::Test, // Insert some deletions for keys that don't exist that // are both in and out of the key range - ASSERT_OK(Delete(cf, Key(smallest+1, key_size))); + ASSERT_OK(Delete(cf, Key(smallest + 1, key_size))); deletions_made++; - ASSERT_OK(Delete(cf, Key(smallest-1, key_size))); + ASSERT_OK(Delete(cf, Key(smallest - 1, key_size))); deletions_made++; num_expired++; - ASSERT_OK(Delete(cf, Key(smallest-9, key_size))); + ASSERT_OK(Delete(cf, Key(smallest - 9, key_size))); deletions_made++; num_expired++; ASSERT_OK(Flush(cf)); - SetDeletionCompactionStats(stats, deletions_made, num_expired, - num_deleted); + SetDeletionCompactionStats(stats, deletions_made, num_expired, num_deleted); } }; @@ -440,25 +434,20 @@ class CompactionJobStatsChecker : public EventListener { // use ASSERT_GE and ASSERT_LE with a reasonable bias --- // 10% in uncompressed case and 20% when compression is used. virtual void Verify(const CompactionJobStats& current_stats, - const CompactionJobStats& stats) { + const CompactionJobStats& stats) { // time ASSERT_GT(current_stats.elapsed_micros, 0U); - ASSERT_EQ(current_stats.num_input_records, - stats.num_input_records); - ASSERT_EQ(current_stats.num_input_files, - stats.num_input_files); + ASSERT_EQ(current_stats.num_input_records, stats.num_input_records); + ASSERT_EQ(current_stats.num_input_files, stats.num_input_files); ASSERT_EQ(current_stats.num_input_files_at_output_level, - stats.num_input_files_at_output_level); + stats.num_input_files_at_output_level); - ASSERT_EQ(current_stats.num_output_records, - stats.num_output_records); - ASSERT_EQ(current_stats.num_output_files, - stats.num_output_files); + ASSERT_EQ(current_stats.num_output_records, stats.num_output_records); + ASSERT_EQ(current_stats.num_output_files, stats.num_output_files); ASSERT_EQ(current_stats.is_full_compaction, stats.is_full_compaction); - ASSERT_EQ(current_stats.is_manual_compaction, - stats.is_manual_compaction); + ASSERT_EQ(current_stats.is_manual_compaction, stats.is_manual_compaction); // file size double kFileSizeBias = compression_enabled_ ? 0.20 : 0.10; @@ -475,18 +464,14 @@ class CompactionJobStatsChecker : public EventListener { ASSERT_EQ(current_stats.total_input_raw_value_bytes, stats.total_input_raw_value_bytes); - ASSERT_EQ(current_stats.num_records_replaced, - stats.num_records_replaced); + ASSERT_EQ(current_stats.num_records_replaced, stats.num_records_replaced); - ASSERT_EQ(current_stats.num_corrupt_keys, - stats.num_corrupt_keys); + ASSERT_EQ(current_stats.num_corrupt_keys, stats.num_corrupt_keys); - ASSERT_EQ( - std::string(current_stats.smallest_output_key_prefix), - std::string(stats.smallest_output_key_prefix)); - ASSERT_EQ( - std::string(current_stats.largest_output_key_prefix), - std::string(stats.largest_output_key_prefix)); + ASSERT_EQ(std::string(current_stats.smallest_output_key_prefix), + std::string(stats.smallest_output_key_prefix)); + ASSERT_EQ(std::string(current_stats.largest_output_key_prefix), + std::string(stats.largest_output_key_prefix)); } // Add an expected compaction stats, which will be used to @@ -497,9 +482,7 @@ class CompactionJobStatsChecker : public EventListener { expected_stats_.push(stats); } - void EnableCompression(bool flag) { - compression_enabled_ = flag; - } + void EnableCompression(bool flag) { compression_enabled_ = flag; } bool verify_next_comp_io_stats() const { return verify_next_comp_io_stats_; } @@ -517,45 +500,37 @@ class CompactionJobDeletionStatsChecker : public CompactionJobStatsChecker { // Verifies whether two CompactionJobStats match. void Verify(const CompactionJobStats& current_stats, const CompactionJobStats& stats) override { - ASSERT_EQ( - current_stats.num_input_deletion_records, - stats.num_input_deletion_records); - ASSERT_EQ( - current_stats.num_expired_deletion_records, - stats.num_expired_deletion_records); - ASSERT_EQ( - current_stats.num_records_replaced, - stats.num_records_replaced); - - ASSERT_EQ(current_stats.num_corrupt_keys, - stats.num_corrupt_keys); + ASSERT_EQ(current_stats.num_input_deletion_records, + stats.num_input_deletion_records); + ASSERT_EQ(current_stats.num_expired_deletion_records, + stats.num_expired_deletion_records); + ASSERT_EQ(current_stats.num_records_replaced, stats.num_records_replaced); + + ASSERT_EQ(current_stats.num_corrupt_keys, stats.num_corrupt_keys); } }; namespace { -uint64_t EstimatedFileSize( - uint64_t num_records, size_t key_size, size_t value_size, - double compression_ratio = 1.0, - size_t block_size = 4096, - int bloom_bits_per_key = 10) { +uint64_t EstimatedFileSize(uint64_t num_records, size_t key_size, + size_t value_size, double compression_ratio = 1.0, + size_t block_size = 4096, + int bloom_bits_per_key = 10) { const size_t kPerKeyOverhead = 8; const size_t kFooterSize = 512; - uint64_t data_size = - static_cast( - num_records * (key_size + value_size * compression_ratio + - kPerKeyOverhead)); + uint64_t data_size = static_cast( + num_records * + (key_size + value_size * compression_ratio + kPerKeyOverhead)); - return data_size + kFooterSize - + num_records * bloom_bits_per_key / 8 // filter block + return data_size + kFooterSize + + num_records * bloom_bits_per_key / 8 // filter block + data_size * (key_size + 8) / block_size; // index block } namespace { -void CopyPrefix( - const Slice& src, size_t prefix_length, std::string* dst) { +void CopyPrefix(const Slice& src, size_t prefix_length, std::string* dst) { assert(prefix_length > 0); size_t length = src.size() > prefix_length ? prefix_length : src.size(); dst->assign(src.data(), length); @@ -581,28 +556,24 @@ CompactionJobStats NewManualCompactionJobStats( stats.num_output_files = num_output_files; stats.total_input_bytes = - EstimatedFileSize( - num_input_records / num_input_files, - key_size, value_size, compression_ratio) * num_input_files; + EstimatedFileSize(num_input_records / num_input_files, key_size, + value_size, compression_ratio) * + num_input_files; stats.total_output_bytes = - EstimatedFileSize( - num_output_records / num_output_files, - key_size, value_size, compression_ratio) * num_output_files; - stats.total_input_raw_key_bytes = - num_input_records * (key_size + 8); - stats.total_input_raw_value_bytes = - num_input_records * value_size; + EstimatedFileSize(num_output_records / num_output_files, key_size, + value_size, compression_ratio) * + num_output_files; + stats.total_input_raw_key_bytes = num_input_records * (key_size + 8); + stats.total_input_raw_value_bytes = num_input_records * value_size; stats.is_full_compaction = is_full; stats.is_manual_compaction = is_manual; stats.num_records_replaced = num_records_replaced; - CopyPrefix(smallest_key, - CompactionJobStats::kMaxPrefixLength, + CopyPrefix(smallest_key, CompactionJobStats::kMaxPrefixLength, &stats.smallest_output_key_prefix); - CopyPrefix(largest_key, - CompactionJobStats::kMaxPrefixLength, + CopyPrefix(largest_key, CompactionJobStats::kMaxPrefixLength, &stats.largest_output_key_prefix); return stats; @@ -662,13 +633,11 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { // 1st Phase: generate "num_L0_files" L0 files. int num_L0_files = 0; - for (uint64_t start_key = key_base; - start_key <= key_base * kTestScale; - start_key += key_base) { - MakeTableWithKeyValues( - &rnd, start_key, start_key + key_base - 1, - kKeySize, kValueSize, key_interval, - compression_ratio, 1); + for (uint64_t start_key = key_base; start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, + kKeySize, kValueSize, key_interval, + compression_ratio, 1); snprintf(buf, kBufSize, "%d", ++num_L0_files); ASSERT_EQ(std::string(buf), FilesPerLevel(1)); } @@ -684,13 +653,9 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { start_key += key_base, count++) { smallest_key = Key(start_key, 10); largest_key = Key(start_key + key_base - key_interval, 10); - stats_checker->AddExpectedStats( - NewManualCompactionJobStats( - smallest_key, largest_key, - 1, 0, num_keys_per_L0_file, - kKeySize, kValueSize, - 1, num_keys_per_L0_file, - compression_ratio, 0)); + stats_checker->AddExpectedStats(NewManualCompactionJobStats( + smallest_key, largest_key, 1, 0, num_keys_per_L0_file, kKeySize, + kValueSize, 1, num_keys_per_L0_file, compression_ratio, 0)); ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); TEST_Compact(0, 1, smallest_key, largest_key); snprintf(buf, kBufSize, "%d,%d", num_L0_files - count, count); @@ -701,14 +666,10 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { int num_remaining_L0 = num_L0_files - L0_compaction_count; smallest_key = Key(key_base * (L0_compaction_count + 1), 10); largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10); - stats_checker->AddExpectedStats( - NewManualCompactionJobStats( - smallest_key, largest_key, - num_remaining_L0, - 0, num_keys_per_L0_file * num_remaining_L0, - kKeySize, kValueSize, - 1, num_keys_per_L0_file * num_remaining_L0, - compression_ratio, 0)); + stats_checker->AddExpectedStats(NewManualCompactionJobStats( + smallest_key, largest_key, num_remaining_L0, 0, + num_keys_per_L0_file * num_remaining_L0, kKeySize, kValueSize, 1, + num_keys_per_L0_file * num_remaining_L0, compression_ratio, 0)); ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); TEST_Compact(0, 1, smallest_key, largest_key); @@ -719,13 +680,11 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { // 3rd Phase: generate sparse L0 files (wider key-range, same num of keys) int sparseness = 2; - for (uint64_t start_key = key_base; - start_key <= key_base * kTestScale; - start_key += key_base * sparseness) { + for (uint64_t start_key = key_base; start_key <= key_base * kTestScale; + start_key += key_base * sparseness) { MakeTableWithKeyValues( - &rnd, start_key, start_key + key_base * sparseness - 1, - kKeySize, kValueSize, - key_base * sparseness / num_keys_per_L0_file, + &rnd, start_key, start_key + key_base * sparseness - 1, kKeySize, + kValueSize, key_base * sparseness / num_keys_per_L0_file, compression_ratio, 1); snprintf(buf, kBufSize, "%d,%d", ++num_L0_files, num_L1_files); ASSERT_EQ(std::string(buf), FilesPerLevel(1)); @@ -737,21 +696,15 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { // output files without coordinating to see if the output could fit into // a smaller number of files like it does when it runs sequentially int num_output_files = options.max_subcompactions > 1 ? 2 : 1; - for (uint64_t start_key = key_base; - num_L0_files > 1; + for (uint64_t start_key = key_base; num_L0_files > 1; start_key += key_base * sparseness) { smallest_key = Key(start_key, 10); - largest_key = - Key(start_key + key_base * sparseness - key_interval, 10); - stats_checker->AddExpectedStats( - NewManualCompactionJobStats( - smallest_key, largest_key, - 3, 2, num_keys_per_L0_file * 3, - kKeySize, kValueSize, - num_output_files, - num_keys_per_L0_file * 2, // 1/3 of the data will be updated. - compression_ratio, - num_keys_per_L0_file)); + largest_key = Key(start_key + key_base * sparseness - key_interval, 10); + stats_checker->AddExpectedStats(NewManualCompactionJobStats( + smallest_key, largest_key, 3, 2, num_keys_per_L0_file * 3, kKeySize, + kValueSize, num_output_files, + num_keys_per_L0_file * 2, // 1/3 of the data will be updated. + compression_ratio, num_keys_per_L0_file)); ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); Compact(1, smallest_key, largest_key); if (options.max_subcompactions == 1) { @@ -766,14 +719,10 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { // In the first sub-compaction, we expect L0 compaction. smallest_key = Key(key_base, 10); largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10); - stats_checker->AddExpectedStats( - NewManualCompactionJobStats( - Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key, - 2, 1, num_keys_per_L0_file * 3, - kKeySize, kValueSize, - 1, num_keys_per_L0_file * 2, - compression_ratio, - num_keys_per_L0_file)); + stats_checker->AddExpectedStats(NewManualCompactionJobStats( + Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key, 2, 1, + num_keys_per_L0_file * 3, kKeySize, kValueSize, 1, + num_keys_per_L0_file * 2, compression_ratio, num_keys_per_L0_file)); ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); Compact(1, smallest_key, largest_key); @@ -869,7 +818,7 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { Options options; options.listeners.emplace_back(stats_checker); options.create_if_missing = true; - options.level0_file_num_compaction_trigger = kTestScale+1; + options.level0_file_num_compaction_trigger = kTestScale + 1; options.num_levels = 3; options.compression = kNoCompression; options.max_bytes_for_level_multiplier = 2; @@ -881,13 +830,10 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { // Stage 1: Generate several L0 files and then send them to L2 by // using CompactRangeOptions and CompactRange(). These files will // have a strict subset of the keys from the full key-range - for (uint64_t start_key = key_base; - start_key <= key_base * kTestScale / 2; - start_key += key_base) { - MakeTableWithKeyValues( - &rnd, start_key, start_key + key_base - 1, - kKeySize, kValueSize, key_interval, - compression_ratio, 1); + for (uint64_t start_key = key_base; start_key <= key_base * kTestScale / 2; + start_key += key_base) { + MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize, + kValueSize, key_interval, compression_ratio, 1); } CompactRangeOptions cr_options; @@ -897,13 +843,10 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { ASSERT_GT(NumTableFilesAtLevel(2, 1), 0); // Stage 2: Generate files including keys from the entire key range - for (uint64_t start_key = key_base; - start_key <= key_base * kTestScale; - start_key += key_base) { - MakeTableWithKeyValues( - &rnd, start_key, start_key + key_base - 1, - kKeySize, kValueSize, key_interval, - compression_ratio, 1); + for (uint64_t start_key = key_base; start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize, + kValueSize, key_interval, compression_ratio, 1); } // Send these L0 files to L1 @@ -912,16 +855,16 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { // Add a new record and flush so now there is a L0 file // with a value too (not just deletions from the next step) - ASSERT_OK(Put(1, Key(key_base-6, kKeySize), "test")); + ASSERT_OK(Put(1, Key(key_base - 6, kKeySize), "test")); ASSERT_OK(Flush(1)); // Stage 3: Generate L0 files with some deletions so now // there are files with the same key range in L0, L1, and L2 int deletion_interval = 3; CompactionJobStats first_compaction_stats; - SelectivelyDeleteKeys(key_base, largest_key_num, - key_interval, deletion_interval, kKeySize, cutoff_key_num, - &first_compaction_stats, 1); + SelectivelyDeleteKeys(key_base, largest_key_num, key_interval, + deletion_interval, kKeySize, cutoff_key_num, + &first_compaction_stats, 1); stats_checker->AddExpectedStats(first_compaction_stats); @@ -932,8 +875,7 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { namespace { int GetUniversalCompactionInputUnits(uint32_t num_flushes) { uint32_t compaction_input_units; - for (compaction_input_units = 1; - num_flushes >= compaction_input_units; + for (compaction_input_units = 1; num_flushes >= compaction_input_units; compaction_input_units *= 2) { if ((num_flushes & compaction_input_units) != 0) { return compaction_input_units > 1 ? compaction_input_units : 0; @@ -998,13 +940,10 @@ TEST_P(CompactionJobStatsTest, UniversalCompactionTest) { } ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 3U); - for (uint64_t start_key = key_base; - start_key <= key_base * kTestScale; - start_key += key_base) { - MakeTableWithKeyValues( - &rnd, start_key, start_key + key_base - 1, - kKeySize, kValueSize, key_interval, - compression_ratio, 1); + for (uint64_t start_key = key_base; start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues(&rnd, start_key, start_key + key_base - 1, kKeySize, + kValueSize, key_interval, compression_ratio, 1); ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForCompact()); } ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U); diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index de80e1a4a..c87871100 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -40,7 +40,7 @@ namespace ROCKSDB_NAMESPACE { namespace { void VerifyInitializationOfCompactionJobStats( - const CompactionJobStats& compaction_job_stats) { + const CompactionJobStats& compaction_job_stats) { #if !defined(IOS_CROSS_COMPILE) ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U); @@ -499,8 +499,7 @@ class CompactionJobTestBase : public testing::Test { // This is how the key will look like once it's written in bottommost // file - InternalKey bottommost_internal_key( - key, 0, kTypeValue); + InternalKey bottommost_internal_key(key, 0, kTypeValue); if (corrupt_id(k)) { test::CorruptKeyType(&internal_key); @@ -620,7 +619,7 @@ class CompactionJobTestBase : public testing::Test { CompactionInputFiles compaction_level; compaction_level.level = input_levels[i]; compaction_level.files.insert(compaction_level.files.end(), - level_files.begin(), level_files.end()); + level_files.begin(), level_files.end()); compaction_input_files.push_back(compaction_level); num_input_files += level_files.size(); } diff --git a/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc index e166f9a67..b36f7f69e 100644 --- a/db/compaction/compaction_picker_universal.cc +++ b/db/compaction/compaction_picker_universal.cc @@ -308,9 +308,10 @@ void UniversalCompactionBuilder::SortedRun::Dump(char* out_buf, if (file->fd.GetPathId() == 0 || !print_path) { snprintf(out_buf, out_buf_size, "file %" PRIu64, file->fd.GetNumber()); } else { - snprintf(out_buf, out_buf_size, "file %" PRIu64 - "(path " - "%" PRIu32 ")", + snprintf(out_buf, out_buf_size, + "file %" PRIu64 + "(path " + "%" PRIu32 ")", file->fd.GetNumber(), file->fd.GetPathId()); } } else {