From e8263dbdaad0546c54bddd01a8454c2e750a86c2 Mon Sep 17 00:00:00 2001 From: sdong Date: Fri, 20 Sep 2019 12:00:55 -0700 Subject: [PATCH] Apply formatter to recent 200+ commits. (#5830) Summary: Further apply formatter to more recent commits. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5830 Test Plan: Run all existing tests. Differential Revision: D17488031 fbshipit-source-id: 137458fd94d56dd271b8b40c522b03036943a2ab --- cache/cache_bench.cc | 4 +- db/column_family.cc | 6 +- db/compaction/compaction_job.cc | 2 +- db/compaction/compaction_job_stats_test.cc | 2 +- db/compaction/compaction_job_test.cc | 2 +- db/db_basic_test.cc | 73 +++----- db/db_compaction_test.cc | 2 +- db/db_filesnapshot.cc | 3 +- db/db_impl/db_impl.cc | 11 +- db/db_impl/db_impl_compaction_flush.cc | 3 +- db/db_impl/db_impl_open.cc | 9 +- db/db_impl/db_impl_secondary.cc | 3 +- db/db_impl/db_secondary_test.cc | 9 +- db/db_info_dumper.cc | 4 +- db/db_test.cc | 5 +- db/db_test_util.cc | 4 +- db/db_universal_compaction_test.cc | 8 +- db/dbformat.cc | 2 +- db/external_sst_file_ingestion_job.cc | 27 ++- db/import_column_family_job.cc | 12 +- db/import_column_family_job.h | 10 +- db/import_column_family_test.cc | 6 +- db/internal_stats.cc | 2 +- db/plain_table_db_test.cc | 11 +- db/range_tombstone_fragmenter.cc | 2 +- db/table_cache.cc | 40 ++-- db/table_cache.h | 3 +- db/version_builder.cc | 17 +- db/version_edit.h | 2 +- db/version_set.cc | 3 +- db/wal_manager.cc | 18 +- db/wal_manager_test.cc | 4 +- db/write_batch.cc | 4 +- env/env_test.cc | 8 +- examples/multi_processes_example.cc | 2 +- file/delete_scheduler_test.cc | 2 +- include/rocksdb/db.h | 3 +- include/rocksdb/utilities/ldb_cmd.h | 2 +- include/rocksdb/utilities/stackable_db.h | 3 +- .../AdvancedColumnFamilyOptionsInterface.java | 5 +- ...edMutableColumnFamilyOptionsInterface.java | 5 +- .../rocksdb/ColumnFamilyOptionsInterface.java | 6 +- .../java/org/rocksdb/DBOptionsInterface.java | 1 - .../MutableColumnFamilyOptionsInterface.java | 7 +- .../rocksdb/MutableDBOptionsInterface.java | 1 - .../java/org/rocksdb/util/Environment.java | 4 +- .../org/rocksdb/util/EnvironmentTest.java | 16 +- logging/auto_roll_logger.cc | 2 +- logging/env_logger.h | 2 +- logging/env_logger_test.cc | 6 +- logging/event_logger.cc | 2 +- monitoring/histogram.cc | 4 +- monitoring/perf_context_imp.h | 3 +- monitoring/statistics.cc | 6 +- options/cf_options.cc | 4 +- options/options_test.cc | 2 +- port/jemalloc_helper.h | 4 +- port/win/env_win.cc | 3 +- .../block_based/block_based_table_factory.cc | 2 +- table/block_based/block_based_table_reader.cc | 70 +++---- table/block_based/filter_block.h | 4 +- table/block_based/full_filter_block.cc | 13 +- .../block_based/uncompression_dict_reader.cc | 6 +- table/cuckoo/cuckoo_table_reader.h | 3 +- table/cuckoo/cuckoo_table_reader_test.cc | 4 +- table/mock_table.cc | 3 +- table/mock_table.h | 2 +- table/plain/plain_table_bloom.cc | 16 +- table/plain/plain_table_bloom.h | 2 +- table/plain/plain_table_reader.h | 3 +- table/table_reader.h | 11 +- test_util/transaction_test_util.cc | 2 +- tools/db_bench_tool.cc | 2 +- tools/db_stress.cc | 2 +- tools/ldb_cmd.cc | 6 +- tools/trace_analyzer_tool.cc | 63 ++++--- tools/write_stress.cc | 2 +- util/bloom_test.cc | 36 ++-- util/crc32c_arm64.cc | 55 +++--- util/crc32c_arm64.h | 2 +- util/dynamic_bloom.cc | 9 +- util/dynamic_bloom.h | 14 +- util/dynamic_bloom_test.cc | 20 +- util/hash_test.cc | 12 +- util/rate_limiter_test.cc | 2 +- util/threadpool_imp.cc | 35 ++-- utilities/backupable/backupable_db.cc | 2 +- utilities/blob_db/blob_file.cc | 2 +- utilities/checkpoint/checkpoint_impl.cc | 2 +- utilities/checkpoint/checkpoint_test.cc | 175 +++++++++--------- utilities/merge_operators/sortlist.cc | 2 +- .../persistent_cache/persistent_cache_tier.cc | 2 +- utilities/simulator_cache/sim_cache.cc | 4 +- .../optimistic_transaction_test.cc | 1 - utilities/transactions/transaction_base.cc | 3 +- utilities/transactions/transaction_test.h | 2 +- .../write_prepared_transaction_test.cc | 4 +- .../transactions/write_prepared_txn_db.cc | 2 +- .../transactions/write_prepared_txn_db.h | 2 +- .../transactions/write_unprepared_txn.cc | 5 +- .../transactions/write_unprepared_txn_db.cc | 2 +- utilities/ttl/db_ttl_impl.h | 4 +- utilities/ttl/ttl_test.cc | 21 +-- .../write_batch_with_index.cc | 6 +- .../write_batch_with_index_test.cc | 1 - 105 files changed, 515 insertions(+), 544 deletions(-) diff --git a/cache/cache_bench.cc b/cache/cache_bench.cc index 35deb2005..288662ad9 100644 --- a/cache/cache_bench.cc +++ b/cache/cache_bench.cc @@ -11,9 +11,9 @@ int main() { } #else -#include -#include #include +#include +#include #include "port/port.h" #include "rocksdb/cache.h" diff --git a/db/column_family.cc b/db/column_family.cc index 4c67b7d76..16688d6ce 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -9,11 +9,11 @@ #include "db/column_family.h" -#include -#include -#include #include +#include #include +#include +#include #include "db/compaction/compaction_picker.h" #include "db/compaction/compaction_picker_fifo.h" diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 5d2a6bfd8..7ec7e2b33 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -7,8 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include #include +#include #include #include #include diff --git a/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc index 221ee3eaa..f25a38ec6 100644 --- a/db/compaction/compaction_job_stats_test.cc +++ b/db/compaction/compaction_job_stats_test.cc @@ -7,8 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include #include +#include #include #include #include diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index f2939f220..3f403e1e5 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -5,9 +5,9 @@ #ifndef ROCKSDB_LITE -#include #include #include +#include #include #include #include diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 907fd3b43..23f7d4451 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -1356,9 +1356,9 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) { // Test class for batched MultiGet with prefix extractor // Param bool - If true, use partitioned filters // If false, use full filter block -class MultiGetPrefixExtractorTest - : public DBBasicTest, - public ::testing::WithParamInterface {}; +class MultiGetPrefixExtractorTest : public DBBasicTest, + public ::testing::WithParamInterface { +}; TEST_P(MultiGetPrefixExtractorTest, Batched) { Options options = CurrentOptions(); @@ -1396,14 +1396,12 @@ TEST_P(MultiGetPrefixExtractorTest, Batched) { ASSERT_EQ(get_perf_context()->bloom_sst_hit_count, 4); } -INSTANTIATE_TEST_CASE_P( - MultiGetPrefix, MultiGetPrefixExtractorTest, - ::testing::Bool()); +INSTANTIATE_TEST_CASE_P(MultiGetPrefix, MultiGetPrefixExtractorTest, + ::testing::Bool()); #ifndef ROCKSDB_LITE -class DBMultiGetRowCacheTest - : public DBBasicTest, - public ::testing::WithParamInterface {}; +class DBMultiGetRowCacheTest : public DBBasicTest, + public ::testing::WithParamInterface {}; TEST_P(DBMultiGetRowCacheTest, MultiGetBatched) { do { @@ -1543,10 +1541,9 @@ TEST_F(DBBasicTest, GetAllKeyVersions) { class DBBasicTestWithParallelIO : public DBTestBase, - public testing::WithParamInterface> { + public testing::WithParamInterface> { public: - DBBasicTestWithParallelIO() - : DBTestBase("/db_basic_test_with_parallel_io") { + DBBasicTestWithParallelIO() : DBTestBase("/db_basic_test_with_parallel_io") { bool compressed_cache = std::get<0>(GetParam()); bool uncompressed_cache = std::get<1>(GetParam()); compression_enabled_ = std::get<2>(GetParam()); @@ -1570,7 +1567,7 @@ class DBBasicTestWithParallelIO table_options.block_cache = uncompressed_cache_; table_options.block_cache_compressed = compressed_cache_; table_options.flush_block_policy_factory.reset( - new MyFlushBlockPolicyFactory()); + new MyFlushBlockPolicyFactory()); options.table_factory.reset(new BlockBasedTableFactory(table_options)); if (!compression_enabled_) { options.compression = kNoCompression; @@ -1598,15 +1595,9 @@ class DBBasicTestWithParallelIO int num_found() { return uncompressed_cache_->num_found(); } int num_inserts() { return uncompressed_cache_->num_inserts(); } - int num_lookups_compressed() { - return compressed_cache_->num_lookups(); - } - int num_found_compressed() { - return compressed_cache_->num_found(); - } - int num_inserts_compressed() { - return compressed_cache_->num_inserts(); - } + int num_lookups_compressed() { return compressed_cache_->num_lookups(); } + int num_found_compressed() { return compressed_cache_->num_found(); } + int num_inserts_compressed() { return compressed_cache_->num_inserts(); } bool fill_cache() { return fill_cache_; } @@ -1614,8 +1605,7 @@ class DBBasicTestWithParallelIO static void TearDownTestCase() {} private: - class MyFlushBlockPolicyFactory - : public FlushBlockPolicyFactory { + class MyFlushBlockPolicyFactory : public FlushBlockPolicyFactory { public: MyFlushBlockPolicyFactory() {} @@ -1630,11 +1620,10 @@ class DBBasicTestWithParallelIO } }; - class MyFlushBlockPolicy - : public FlushBlockPolicy { + class MyFlushBlockPolicy : public FlushBlockPolicy { public: explicit MyFlushBlockPolicy(const BlockBuilder& data_block_builder) - : num_keys_(0), data_block_builder_(data_block_builder) {} + : num_keys_(0), data_block_builder_(data_block_builder) {} bool Update(const Slice& /*key*/, const Slice& /*value*/) override { if (data_block_builder_.empty()) { @@ -1656,11 +1645,10 @@ class DBBasicTestWithParallelIO const BlockBuilder& data_block_builder_; }; - class MyBlockCache - : public Cache { + class MyBlockCache : public Cache { public: explicit MyBlockCache(std::shared_ptr& target) - : target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {} + : target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {} virtual const char* Name() const override { return "MyBlockCache"; } @@ -1682,9 +1670,7 @@ class DBBasicTestWithParallelIO return handle; } - virtual bool Ref(Handle* handle) override { - return target_->Ref(handle); - } + virtual bool Ref(Handle* handle) override { return target_->Ref(handle); } virtual bool Release(Handle* handle, bool force_erase = false) override { return target_->Release(handle, force_erase); @@ -1694,12 +1680,8 @@ class DBBasicTestWithParallelIO return target_->Value(handle); } - virtual void Erase(const Slice& key) override { - target_->Erase(key); - } - virtual uint64_t NewId() override { - return target_->NewId(); - } + virtual void Erase(const Slice& key) override { target_->Erase(key); } + virtual uint64_t NewId() override { return target_->NewId(); } virtual void SetCapacity(size_t capacity) override { target_->SetCapacity(capacity); @@ -1717,9 +1699,7 @@ class DBBasicTestWithParallelIO return target_->GetCapacity(); } - virtual size_t GetUsage() const override { - return target_->GetUsage(); - } + virtual size_t GetUsage() const override { return target_->GetUsage(); } virtual size_t GetUsage(Handle* handle) const override { return target_->GetUsage(handle); @@ -1745,6 +1725,7 @@ class DBBasicTestWithParallelIO int num_found() { return num_found_; } int num_inserts() { return num_inserts_; } + private: std::shared_ptr target_; int num_lookups_; @@ -1777,7 +1758,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) { statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), - keys.data(), values.data(), statuses.data(), true); + keys.data(), values.data(), statuses.data(), true); ASSERT_TRUE(CheckValue(0, values[0].ToString())); ASSERT_TRUE(CheckValue(50, values[1].ToString())); @@ -1789,7 +1770,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) { values[0].Reset(); values[1].Reset(); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), - keys.data(), values.data(), statuses.data(), true); + keys.data(), values.data(), statuses.data(), true); ASSERT_TRUE(CheckValue(1, values[0].ToString())); ASSERT_TRUE(CheckValue(51, values[1].ToString())); @@ -1798,7 +1779,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) { keys.resize(10); statuses.resize(10); - std::vector key_ints{1,2,15,16,55,81,82,83,84,85}; + std::vector key_ints{1, 2, 15, 16, 55, 81, 82, 83, 84, 85}; for (size_t i = 0; i < key_ints.size(); ++i) { key_data[i] = Key(key_ints[i]); keys[i] = Slice(key_data[i]); @@ -1806,7 +1787,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) { values[i].Reset(); } dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), - keys.data(), values.data(), statuses.data(), true); + keys.data(), values.data(), statuses.data(), true); for (size_t i = 0; i < key_ints.size(); ++i) { ASSERT_OK(statuses[i]); ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString())); diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 1ff8bd38c..dad19921c 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -4664,7 +4664,7 @@ TEST_F(DBCompactionTest, ConsistencyFailTest) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "VersionBuilder::CheckConsistency", [&](void* arg) { - auto p = + auto p = reinterpret_cast*>(arg); // just swap the two FileMetaData so that we hit error // in CheckConsistency funcion diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index a6dcdcccb..dd5f8f67f 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -6,9 +6,9 @@ #ifndef ROCKSDB_LITE -#include #include #include +#include #include #include "db/db_impl/db_impl.h" #include "db/job_context.h" @@ -172,7 +172,6 @@ Status DBImpl::GetCurrentWalFile(std::unique_ptr* current_log_file) { return wal_manager_.GetLiveWalFile(current_logfile_number, current_log_file); } - } #endif // ROCKSDB_LITE diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index dde80d451..1c2df575d 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -3286,9 +3286,8 @@ Status DestroyDB(const std::string& dbname, const Options& options, if (type == kMetaDatabase) { del = DestroyDB(path_to_delete, options); } else if (type == kTableFile || type == kLogFile) { - del = - DeleteDBFile(&soptions, path_to_delete, dbname, - /*force_bg=*/false, /*force_fg=*/!wal_in_db_path); + del = DeleteDBFile(&soptions, path_to_delete, dbname, + /*force_bg=*/false, /*force_fg=*/!wal_in_db_path); } else { del = env->DeleteFile(path_to_delete); } @@ -4003,8 +4002,7 @@ Status DBImpl::IngestExternalFiles( Status DBImpl::CreateColumnFamilyWithImport( const ColumnFamilyOptions& options, const std::string& column_family_name, const ImportColumnFamilyOptions& import_options, - const ExportImportFilesMetaData& metadata, - ColumnFamilyHandle** handle) { + const ExportImportFilesMetaData& metadata, ColumnFamilyHandle** handle) { assert(handle != nullptr); assert(*handle == nullptr); std::string cf_comparator_name = options.comparator->Name(); @@ -4045,8 +4043,7 @@ Status DBImpl::CreateColumnFamilyWithImport( // reuse the file number that has already assigned to the internal file, // and this will overwrite the external file. To protect the external // file, we have to make sure the file number will never being reused. - next_file_number = - versions_->FetchAddFileNumber(metadata.files.size()); + next_file_number = versions_->FetchAddFileNumber(metadata.files.size()); auto cf_options = cfd->GetLatestMutableCFOptions(); status = versions_->LogAndApply(cfd, *cf_options, &dummy_edit, &mutex_, directories_.GetDbDir()); diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 42b0b93bb..2c8be2847 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1565,7 +1565,8 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, if (stats_cf_flush_needed) { ROCKS_LOG_INFO(immutable_db_options_.info_log, "Force flushing stats CF with manual flush of %s " - "to avoid holding old logs", cfd->GetName().c_str()); + "to avoid holding old logs", + cfd->GetName().c_str()); s = SwitchMemtable(cfd_stats, &context); flush_memtable_id = cfd_stats->imm()->GetLatestMemTableID(); flush_req.emplace_back(cfd_stats, flush_memtable_id); diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 5078748d0..076985de1 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -135,9 +135,9 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) { std::vector filenames; result.env->GetChildren(result.wal_dir, &filenames); for (std::string& filename : filenames) { - if (filename.find(".log.trash", - filename.length() - std::string(".log.trash").length()) != - std::string::npos) { + if (filename.find(".log.trash", filename.length() - + std::string(".log.trash").length()) != + std::string::npos) { std::string trash_file = result.wal_dir + "/" + filename; result.env->DeleteFile(trash_file); } @@ -1352,8 +1352,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname, return s; } - impl->wal_in_db_path_ = - IsWalDirSameAsDBPath(&impl->immutable_db_options_); + impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_); impl->mutex_.Lock(); // Handles create_if_missing, error_if_exists diff --git a/db/db_impl/db_impl_secondary.cc b/db/db_impl/db_impl_secondary.cc index da4a1da3a..8eac41ded 100644 --- a/db/db_impl/db_impl_secondary.cc +++ b/db/db_impl/db_impl_secondary.cc @@ -588,8 +588,7 @@ Status DB::OpenAsSecondary( &impl->write_controller_)); impl->column_family_memtables_.reset( new ColumnFamilyMemTablesImpl(impl->versions_->GetColumnFamilySet())); - impl->wal_in_db_path_ = - IsWalDirSameAsDBPath(&impl->immutable_db_options_); + impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_); impl->mutex_.Lock(); s = impl->Recover(column_families, true, false, false); diff --git a/db/db_impl/db_secondary_test.cc b/db/db_impl/db_secondary_test.cc index 26f43c107..6caff005e 100644 --- a/db/db_impl/db_secondary_test.cc +++ b/db/db_impl/db_secondary_test.cc @@ -605,9 +605,9 @@ TEST_F(DBSecondaryTest, SwitchWAL) { TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) { const int kNumKeysPerMemtable = 1; SyncPoint::GetInstance()->DisableProcessing(); - SyncPoint::GetInstance()->LoadDependency({ - {"DBImpl::BackgroundCallFlush:ContextCleanedUp", - "DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}}); + SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::BackgroundCallFlush:ContextCleanedUp", + "DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}}); SyncPoint::GetInstance()->EnableProcessing(); const std::string kCFName1 = "pikachu"; Options options; @@ -662,7 +662,8 @@ TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) { Put(0 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k))); ASSERT_OK( Put(1 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k))); - TEST_SYNC_POINT("DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"); + TEST_SYNC_POINT( + "DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"); ASSERT_OK(db_secondary_->TryCatchUpWithPrimary()); verify_db(dbfull(), handles_, db_secondary_, handles_secondary_); SyncPoint::GetInstance()->ClearTrace(); diff --git a/db/db_info_dumper.cc b/db/db_info_dumper.cc index e2bb01e0e..7ab7e3337 100644 --- a/db/db_info_dumper.cc +++ b/db/db_info_dumper.cc @@ -5,10 +5,10 @@ #include "db/db_info_dumper.h" -#include #include -#include #include +#include +#include #include #include "file/filename.h" diff --git a/db/db_test.cc b/db/db_test.cc index 9ca550b8d..04785c3c1 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2793,7 +2793,8 @@ class ModelDB : public DB { return Status::OK(); } - Status GetCurrentWalFile(std::unique_ptr* /*current_log_file*/) override { + Status GetCurrentWalFile( + std::unique_ptr* /*current_log_file*/) override { return Status::OK(); } @@ -6265,7 +6266,7 @@ TEST_F(DBTest, LargeBlockSizeTest) { CreateAndReopenWithCF({"pikachu"}, options); ASSERT_OK(Put(0, "foo", "bar")); BlockBasedTableOptions table_options; - table_options.block_size = 8LL*1024*1024*1024LL; + table_options.block_size = 8LL * 1024 * 1024 * 1024LL; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); ASSERT_NOK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); } diff --git a/db/db_test_util.cc b/db/db_test_util.cc index fae03e2ab..41e4c5105 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -48,9 +48,7 @@ ROT13BlockCipher rot13Cipher_(16); #endif // ROCKSDB_LITE DBTestBase::DBTestBase(const std::string path) - : mem_env_(nullptr), - encrypted_env_(nullptr), - option_config_(kDefault) { + : mem_env_(nullptr), encrypted_env_(nullptr), option_config_(kDefault) { Env* base_env = Env::Default(); #ifndef ROCKSDB_LITE const char* test_env_uri = getenv("TEST_ENV_URI"); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 4f3ddef10..fa2277bad 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -452,7 +452,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) { ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], &mutable_cf_options)); ASSERT_EQ(110u, mutable_cf_options.compaction_options_universal - .max_size_amplification_percent); + .max_size_amplification_percent); dbfull()->TEST_WaitForCompact(); // Verify that size amplification did happen @@ -534,8 +534,10 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) { ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], &mutable_cf_options)); ASSERT_EQ(mutable_cf_options.compaction_options_universal.size_ratio, 100u); - ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width, 2u); - ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width, 2u); + ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width, + 2u); + ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width, + 2u); dbfull()->TEST_WaitForCompact(); diff --git a/db/dbformat.cc b/db/dbformat.cc index 130ba4e8a..a20e2a02d 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -8,8 +8,8 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/dbformat.h" -#include #include +#include #include "monitoring/perf_context_imp.h" #include "port/port.h" #include "util/coding.h" diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index c11b346a2..6b53ce391 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -86,8 +86,7 @@ Status ExternalSstFileIngestionJob::Prepare( return Status::InvalidArgument("File contain no entries"); } - if (!f.smallest_internal_key.Valid() || - !f.largest_internal_key.Valid()) { + if (!f.smallest_internal_key.Valid() || !f.largest_internal_key.Valid()) { return Status::Corruption("Generated table have corrupted keys"); } } @@ -448,8 +447,10 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo( table_reader->NewRangeTombstoneIterator(ro)); // Get first (smallest) and last (largest) key from file. - file_to_ingest->smallest_internal_key = InternalKey("", 0, ValueType::kTypeValue); - file_to_ingest->largest_internal_key = InternalKey("", 0, ValueType::kTypeValue); + file_to_ingest->smallest_internal_key = + InternalKey("", 0, ValueType::kTypeValue); + file_to_ingest->largest_internal_key = + InternalKey("", 0, ValueType::kTypeValue); bool bounds_set = false; iter->SeekToFirst(); if (iter->Valid()) { @@ -485,11 +486,15 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo( RangeTombstone tombstone(key, range_del_iter->value()); InternalKey start_key = tombstone.SerializeKey(); - if (!bounds_set || sstableKeyCompare(ucmp, start_key, file_to_ingest->smallest_internal_key) < 0) { + if (!bounds_set || + sstableKeyCompare(ucmp, start_key, + file_to_ingest->smallest_internal_key) < 0) { file_to_ingest->smallest_internal_key = start_key; } InternalKey end_key = tombstone.SerializeEndKey(); - if (!bounds_set || sstableKeyCompare(ucmp, end_key, file_to_ingest->largest_internal_key) > 0) { + if (!bounds_set || + sstableKeyCompare(ucmp, end_key, + file_to_ingest->largest_internal_key) > 0) { file_to_ingest->largest_internal_key = end_key; } bounds_set = true; @@ -531,9 +536,10 @@ Status ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile( if (vstorage->NumLevelFiles(lvl) > 0) { bool overlap_with_level = false; - status = sv->current->OverlapWithLevelIterator(ro, env_options_, - file_to_ingest->smallest_internal_key.user_key(), file_to_ingest->largest_internal_key.user_key(), - lvl, &overlap_with_level); + status = sv->current->OverlapWithLevelIterator( + ro, env_options_, file_to_ingest->smallest_internal_key.user_key(), + file_to_ingest->largest_internal_key.user_key(), lvl, + &overlap_with_level); if (!status.ok()) { return status; } @@ -672,7 +678,8 @@ bool ExternalSstFileIngestionJob::IngestedFileFitInLevel( } auto* vstorage = cfd_->current()->storage_info(); - Slice file_smallest_user_key(file_to_ingest->smallest_internal_key.user_key()); + Slice file_smallest_user_key( + file_to_ingest->smallest_internal_key.user_key()); Slice file_largest_user_key(file_to_ingest->largest_internal_key.user_key()); if (vstorage->OverlapInLevel(level, &file_smallest_user_key, diff --git a/db/import_column_family_job.cc b/db/import_column_family_job.cc index 7a6aa7344..66b8b1622 100644 --- a/db/import_column_family_job.cc +++ b/db/import_column_family_job.cc @@ -2,8 +2,8 @@ #include "db/import_column_family_job.h" -#include #include +#include #include #include @@ -64,7 +64,8 @@ Status ImportColumnFamilyJob::Prepare(uint64_t next_file_number, for (size_t i = 0; i < sorted_files.size() - 1; i++) { if (sstableKeyCompare(ucmp, sorted_files[i]->largest_internal_key, - sorted_files[i + 1]->smallest_internal_key) >= 0) { + sorted_files[i + 1]->smallest_internal_key) >= + 0) { return Status::InvalidArgument("Files have overlapping ranges"); } } @@ -76,8 +77,7 @@ Status ImportColumnFamilyJob::Prepare(uint64_t next_file_number, return Status::InvalidArgument("File contain no entries"); } - if (!f.smallest_internal_key.Valid() || - !f.largest_internal_key.Valid()) { + if (!f.smallest_internal_key.Valid() || !f.largest_internal_key.Valid()) { return Status::Corruption("File has corrupted keys"); } } @@ -198,8 +198,8 @@ Status ImportColumnFamilyJob::GetIngestedFileInfo( if (!status.ok()) { return status; } - sst_file_reader.reset(new RandomAccessFileReader(std::move(sst_file), - external_file)); + sst_file_reader.reset( + new RandomAccessFileReader(std::move(sst_file), external_file)); status = cfd_->ioptions()->table_factory->NewTableReader( TableReaderOptions(*cfd_->ioptions(), diff --git a/db/import_column_family_job.h b/db/import_column_family_job.h index 5b8577df1..05796590b 100644 --- a/db/import_column_family_job.h +++ b/db/import_column_family_job.h @@ -20,11 +20,11 @@ namespace rocksdb { // to ExternalSstFileIngestionJob. class ImportColumnFamilyJob { public: - ImportColumnFamilyJob( - Env* env, VersionSet* versions, ColumnFamilyData* cfd, - const ImmutableDBOptions& db_options, const EnvOptions& env_options, - const ImportColumnFamilyOptions& import_options, - const std::vector& metadata) + ImportColumnFamilyJob(Env* env, VersionSet* versions, ColumnFamilyData* cfd, + const ImmutableDBOptions& db_options, + const EnvOptions& env_options, + const ImportColumnFamilyOptions& import_options, + const std::vector& metadata) : env_(env), versions_(versions), cfd_(cfd), diff --git a/db/import_column_family_test.cc b/db/import_column_family_test.cc index 4f695d33f..1138b16ba 100644 --- a/db/import_column_family_test.cc +++ b/db/import_column_family_test.cc @@ -45,8 +45,7 @@ class ImportColumnFamilyTest : public DBTestBase { test::DestroyDir(env_, export_files_dir_); } - LiveFileMetaData LiveFileMetaDataInit(std::string name, - std::string path, + LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path, int level, SequenceNumber smallest_seqno, SequenceNumber largest_seqno) { @@ -64,7 +63,7 @@ class ImportColumnFamilyTest : public DBTestBase { std::string export_files_dir_; ColumnFamilyHandle* import_cfh_; ColumnFamilyHandle* import_cfh2_; - ExportImportFilesMetaData *metadata_ptr_; + ExportImportFilesMetaData* metadata_ptr_; }; TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFiles) { @@ -545,7 +544,6 @@ TEST_F(ImportColumnFamilyTest, ImportColumnFamilyNegativeTest) { metadata, &import_cfh_)); ASSERT_NE(import_cfh_, nullptr); } - } } // namespace rocksdb diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 2ddd9122e..94d9cd8ac 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -10,8 +10,8 @@ #include "db/internal_stats.h" -#include #include +#include #include #include #include diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index b4e983a7b..186f4717d 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -745,20 +745,19 @@ TEST_P(PlainTableDBTest, BloomSchema) { Options options = CurrentOptions(); options.create_if_missing = true; for (int bloom_locality = 0; bloom_locality <= 1; bloom_locality++) { - options.bloom_locality = bloom_locality; + options.bloom_locality = bloom_locality; PlainTableOptions plain_table_options; plain_table_options.user_key_len = 16; - plain_table_options.bloom_bits_per_key = 3; // high FP rate for test + plain_table_options.bloom_bits_per_key = 3; // high FP rate for test plain_table_options.hash_table_ratio = 0.75; plain_table_options.index_sparseness = 16; plain_table_options.huge_page_tlb_size = 0; plain_table_options.encoding_type = kPlain; - bool expect_bloom_not_match = false; options.table_factory.reset(new TestPlainTableFactory( - &expect_bloom_not_match, plain_table_options, - 0 /* column_family_id */, kDefaultColumnFamilyName)); + &expect_bloom_not_match, plain_table_options, 0 /* column_family_id */, + kDefaultColumnFamilyName)); DestroyAndReopen(&options); for (unsigned i = 0; i < 2345; ++i) { @@ -782,7 +781,7 @@ TEST_P(PlainTableDBTest, BloomSchema) { pattern = 163905UL; } bool expect_fp = pattern & (1UL << i); - //fprintf(stderr, "expect_fp@%u: %d\n", i, (int)expect_fp); + // fprintf(stderr, "expect_fp@%u: %d\n", i, (int)expect_fp); expect_bloom_not_match = !expect_fp; ASSERT_EQ("NOT_FOUND", Get(NthKey(i, 'n'))); } diff --git a/db/range_tombstone_fragmenter.cc b/db/range_tombstone_fragmenter.cc index 3d3a5c452..fe64623fe 100644 --- a/db/range_tombstone_fragmenter.cc +++ b/db/range_tombstone_fragmenter.cc @@ -9,8 +9,8 @@ #include #include -#include #include +#include #include "util/autovector.h" #include "util/kv_map.h" diff --git a/db/table_cache.cc b/db/table_cache.cc index 98070be69..89acd3d84 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -277,10 +277,11 @@ Status TableCache::GetRangeTombstoneIterator( } #ifndef ROCKSDB_LITE -void TableCache::CreateRowCacheKeyPrefix( - const ReadOptions& options, - const FileDescriptor& fd, const Slice& internal_key, - GetContext* get_context, IterKey& row_cache_key) { +void TableCache::CreateRowCacheKeyPrefix(const ReadOptions& options, + const FileDescriptor& fd, + const Slice& internal_key, + GetContext* get_context, + IterKey& row_cache_key) { uint64_t fd_number = fd.GetNumber(); // We use the user key as cache key instead of the internal key, // otherwise the whole cache would be invalidated every time the @@ -312,13 +313,11 @@ void TableCache::CreateRowCacheKeyPrefix( AppendVarint64(&row_cache_key, seq_no); } -bool TableCache::GetFromRowCache( - const Slice& user_key, IterKey& row_cache_key, - size_t prefix_size, GetContext* get_context) { +bool TableCache::GetFromRowCache(const Slice& user_key, IterKey& row_cache_key, + size_t prefix_size, GetContext* get_context) { bool found = false; - row_cache_key.TrimAppend(prefix_size, user_key.data(), - user_key.size()); + row_cache_key.TrimAppend(prefix_size, user_key.data(), user_key.size()); if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetUserKey())) { // Cleanable routine to release the cache entry @@ -327,8 +326,8 @@ bool TableCache::GetFromRowCache( void* cache_handle) { ((Cache*)cache_to_clean)->Release((Cache::Handle*)cache_handle); }; - auto found_row_cache_entry = static_cast( - ioptions_.row_cache->Value(row_handle)); + auto found_row_cache_entry = + static_cast(ioptions_.row_cache->Value(row_handle)); // If it comes here value is located on the cache. // found_row_cache_entry points to the value on cache, // and value_pinner has cleanup procedure for the cached entry. @@ -442,14 +441,15 @@ Status TableCache::MultiGet(const ReadOptions& options, Status s; TableReader* t = fd.table_reader; Cache::Handle* handle = nullptr; - MultiGetRange table_range(*mget_range, mget_range->begin(), mget_range->end()); + MultiGetRange table_range(*mget_range, mget_range->begin(), + mget_range->end()); #ifndef ROCKSDB_LITE autovector row_cache_entries; IterKey row_cache_key; size_t row_cache_key_prefix_size = 0; KeyContext& first_key = *table_range.begin(); - bool lookup_row_cache = ioptions_.row_cache && - !first_key.get_context->NeedToReadSequence(); + bool lookup_row_cache = + ioptions_.row_cache && !first_key.get_context->NeedToReadSequence(); // Check row cache if enabled. Since row cache does not currently store // sequence numbers, we cannot use it if we need to fetch the sequence. @@ -459,8 +459,10 @@ Status TableCache::MultiGet(const ReadOptions& options, row_cache_key); row_cache_key_prefix_size = row_cache_key.Size(); - for (auto miter = table_range.begin(); miter != table_range.end(); ++miter) { - const Slice& user_key = miter->ukey;; + for (auto miter = table_range.begin(); miter != table_range.end(); + ++miter) { + const Slice& user_key = miter->ukey; + ; GetContext* get_context = miter->get_context; if (GetFromRowCache(user_key, row_cache_key, row_cache_key_prefix_size, @@ -519,9 +521,11 @@ Status TableCache::MultiGet(const ReadOptions& options, if (lookup_row_cache) { size_t row_idx = 0; - for (auto miter = table_range.begin(); miter != table_range.end(); ++miter) { + for (auto miter = table_range.begin(); miter != table_range.end(); + ++miter) { std::string& row_cache_entry = row_cache_entries[row_idx++]; - const Slice& user_key = miter->ukey;; + const Slice& user_key = miter->ukey; + ; GetContext* get_context = miter->get_context; get_context->SetReplayLog(nullptr); diff --git a/db/table_cache.h b/db/table_cache.h index 85592858a..088040672 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -208,8 +208,7 @@ class TableCache { void CreateRowCacheKeyPrefix(const ReadOptions& options, const FileDescriptor& fd, const Slice& internal_key, - GetContext* get_context, - IterKey& row_cache_key); + GetContext* get_context, IterKey& row_cache_key); // Helper function to lookup the row cache for a key. It appends the // user key to row_cache_key at offset prefix_size diff --git a/db/version_builder.cc b/db/version_builder.cc index b97853f2d..8a8aefcb8 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -9,9 +9,9 @@ #include "db/version_builder.h" -#include #include #include +#include #include #include #include @@ -173,14 +173,13 @@ class VersionBuilder::Rep { " vs. file with global_seqno %" PRIu64 "\n", f1->fd.smallest_seqno, f1->fd.largest_seqno, external_file_seqno); - return Status::Corruption("L0 file with seqno " + - NumberToString(f1->fd.smallest_seqno) + - " " + - NumberToString(f1->fd.largest_seqno) + - " vs. file with global_seqno" + - NumberToString(external_file_seqno) + - " with fileNumber " + - NumberToString(f1->fd.GetNumber())); + return Status::Corruption( + "L0 file with seqno " + + NumberToString(f1->fd.smallest_seqno) + " " + + NumberToString(f1->fd.largest_seqno) + + " vs. file with global_seqno" + + NumberToString(external_file_seqno) + " with fileNumber " + + NumberToString(f1->fd.GetNumber())); } } else if (f1->fd.smallest_seqno <= f2->fd.smallest_seqno) { fprintf(stderr, diff --git a/db/version_edit.h b/db/version_edit.h index 06a8b0caf..7ab8fc0f1 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -52,7 +52,7 @@ struct FileDescriptor { smallest_seqno(_smallest_seqno), largest_seqno(_largest_seqno) {} - FileDescriptor(const FileDescriptor& fd) { *this=fd; } + FileDescriptor(const FileDescriptor& fd) { *this = fd; } FileDescriptor& operator=(const FileDescriptor& fd) { table_reader = fd.table_reader; diff --git a/db/version_set.cc b/db/version_set.cc index a8ae98550..61c67b42a 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -5727,7 +5727,8 @@ Status ReactiveVersionSet::ApplyOneVersionEditToBuilder( return Status::OK(); } if (active_version_builders_.find(edit.column_family_) == - active_version_builders_.end() && !cfd->IsDropped()) { + active_version_builders_.end() && + !cfd->IsDropped()) { std::unique_ptr builder_guard( new BaseReferencedVersionBuilder(cfd)); active_version_builders_.insert( diff --git a/db/wal_manager.cc b/db/wal_manager.cc index 783e1c7ac..d3f59a10e 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -9,10 +9,10 @@ #include "db/wal_manager.h" -#include #include -#include +#include #include +#include #include "db/log_reader.h" #include "db/log_writer.h" @@ -415,12 +415,13 @@ Status WalManager::ReadFirstRecord(const WalFileType type, return s; } -Status WalManager::GetLiveWalFile(uint64_t number, std::unique_ptr* log_file) { +Status WalManager::GetLiveWalFile(uint64_t number, + std::unique_ptr* log_file) { if (!log_file) { return Status::InvalidArgument("log_file not preallocated."); } - if(!number) { + if (!number) { return Status::PathNotFound("log file not available"); } @@ -433,16 +434,13 @@ Status WalManager::GetLiveWalFile(uint64_t number, std::unique_ptr* log return s; } - log_file->reset(new LogFileImpl( - number, - kAliveLogFile, - 0, // SequenceNumber - size_bytes)); + log_file->reset(new LogFileImpl(number, kAliveLogFile, + 0, // SequenceNumber + size_bytes)); return Status::OK(); } - // the function returns status.ok() and sequence == 0 if the file exists, but is // empty Status WalManager::ReadFirstLine(const std::string& fname, diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index 089c49cc6..4f15a064d 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -293,7 +293,7 @@ TEST_F(WalManagerTest, TransactionLogIteratorJustEmptyFile) { // Check that an empty iterator is returned ASSERT_TRUE(!iter->Valid()); } - + TEST_F(WalManagerTest, TransactionLogIteratorNewFileWhileScanning) { Init(); CreateArchiveLogs(2, 100); @@ -307,7 +307,7 @@ TEST_F(WalManagerTest, TransactionLogIteratorNewFileWhileScanning) { // A new log file was added after the iterator was created. // TryAgain indicates a new iterator is needed to fetch the new data ASSERT_TRUE(iter->status().IsTryAgain()); - + iter = OpenTransactionLogIter(0); i = 0; for (; iter->Valid(); iter->Next()) { diff --git a/db/write_batch.cc b/db/write_batch.cc index 1b878f3b0..350c1a1c0 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -334,9 +334,7 @@ void WriteBatch::Clear() { wal_term_point_.clear(); } -uint32_t WriteBatch::Count() const { - return WriteBatchInternal::Count(this); -} +uint32_t WriteBatch::Count() const { return WriteBatchInternal::Count(this); } uint32_t WriteBatch::ComputeContentFlags() const { auto rv = content_flags_.load(std::memory_order_relaxed); diff --git a/env/env_test.cc b/env/env_test.cc index f9c597823..fe42b8520 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1110,7 +1110,8 @@ TEST_P(EnvPosixTestWithParam, MultiRead) { // Create file. { std::unique_ptr wfile; -#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX) +#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ + !defined(OS_AIX) if (soptions.use_direct_writes) { soptions.use_direct_writes = false; } @@ -1137,7 +1138,8 @@ TEST_P(EnvPosixTestWithParam, MultiRead) { data.emplace_back(NewAligned(kSectorSize, 0)); reqs[i].scratch = data.back().get(); } -#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX) +#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ + !defined(OS_AIX) if (soptions.use_direct_reads) { soptions.use_direct_reads = false; } @@ -1145,7 +1147,7 @@ TEST_P(EnvPosixTestWithParam, MultiRead) { ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions)); ASSERT_OK(file->MultiRead(reqs.data(), reqs.size())); for (size_t i = 0; i < reqs.size(); ++i) { - auto buf = NewAligned(kSectorSize * 8, static_cast(i*2 + 1)); + auto buf = NewAligned(kSectorSize * 8, static_cast(i * 2 + 1)); ASSERT_OK(reqs[i].status); ASSERT_EQ(memcmp(reqs[i].scratch, buf.get(), kSectorSize), 0); } diff --git a/examples/multi_processes_example.cc b/examples/multi_processes_example.cc index 7350e1be2..921041a57 100644 --- a/examples/multi_processes_example.cc +++ b/examples/multi_processes_example.cc @@ -14,8 +14,8 @@ // run for a while, tailing the logs of the primary. After process with primary // instance exits, this process will keep running until you hit 'CTRL+C'. -#include #include +#include #include #include #include diff --git a/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc index 3549a9f84..b6d4b903c 100644 --- a/file/delete_scheduler_test.cc +++ b/file/delete_scheduler_test.cc @@ -3,8 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include #include +#include #include #include diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index ca8ab85b8..b03c12e00 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -1137,7 +1137,8 @@ class DB { // // Additionally, for the sake of optimization current_log_file->StartSequence // would always be set to 0 - virtual Status GetCurrentWalFile(std::unique_ptr* current_log_file) = 0; + virtual Status GetCurrentWalFile( + std::unique_ptr* current_log_file) = 0; // Note: this API is not yet consistent with WritePrepared transactions. // Sets iter to an iterator that is positioned at a write-batch containing diff --git a/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h index efac3f84d..3a5c980f4 100644 --- a/include/rocksdb/utilities/ldb_cmd.h +++ b/include/rocksdb/utilities/ldb_cmd.h @@ -131,7 +131,7 @@ class LDBCommand { std::string db_path_; // If empty, open DB as primary. If non-empty, open the DB as secondary // with this secondary path. When running against a database opened by - // another process, ldb wll leave the source directory completely intact. + // another process, ldb wll leave the source directory completely intact. std::string secondary_path_; std::string column_family_name_; DB* db_; diff --git a/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h index 9dd038b84..32202d7a0 100644 --- a/include/rocksdb/utilities/stackable_db.h +++ b/include/rocksdb/utilities/stackable_db.h @@ -378,7 +378,8 @@ class StackableDB : public DB { return db_->GetSortedWalFiles(files); } - virtual Status GetCurrentWalFile(std::unique_ptr* current_log_file) override { + virtual Status GetCurrentWalFile( + std::unique_ptr* current_log_file) override { return db_->GetCurrentWalFile(current_log_file); } diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index 532db4734..91e3b2fa2 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -13,9 +13,8 @@ import java.util.List; * * Taken from include/rocksdb/advanced_options.h */ -public interface AdvancedColumnFamilyOptionsInterface - > { - +public interface AdvancedColumnFamilyOptionsInterface< + T extends AdvancedColumnFamilyOptionsInterface> { /** * The minimum number of write buffers that will be merged together * before writing to storage. If set to 1, then diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 64a6f9dcc..03a7b0983 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -11,9 +11,8 @@ package org.rocksdb; * Taken from include/rocksdb/advanced_options.h * and MutableCFOptions in util/cf_options.h */ -public interface AdvancedMutableColumnFamilyOptionsInterface - > { - +public interface AdvancedMutableColumnFamilyOptionsInterface< + T extends AdvancedMutableColumnFamilyOptionsInterface> { /** * The maximum number of write buffers that are built up in memory. * The default is 2, so that when 1 write buffer is being flushed to diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 3c8cd5d51..6d8dfd161 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -5,10 +5,8 @@ package org.rocksdb; -public interface ColumnFamilyOptionsInterface - > - extends AdvancedColumnFamilyOptionsInterface { - +public interface ColumnFamilyOptionsInterface> + extends AdvancedColumnFamilyOptionsInterface { /** * Use this if your DB is very small (like under 1GB) and you don't want to * spend lots of memory for memtables. diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index 611f4f5da..f4217412f 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -9,7 +9,6 @@ import java.util.Collection; import java.util.List; public interface DBOptionsInterface> { - /** * Use this if your DB is very small (like under 1GB) and you don't want to * spend lots of memory for memtables. diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 4ae96daaf..be3a5d483 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -5,10 +5,9 @@ package org.rocksdb; -public interface MutableColumnFamilyOptionsInterface - > - extends AdvancedMutableColumnFamilyOptionsInterface { - +public interface MutableColumnFamilyOptionsInterface< + T extends MutableColumnFamilyOptionsInterface> + extends AdvancedMutableColumnFamilyOptionsInterface { /** * Amount of data to build up in memory (backed by an unsorted log * on disk) before converting to a sorted on-disk file. diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index 00087a43c..50347d38d 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -2,7 +2,6 @@ package org.rocksdb; public interface MutableDBOptionsInterface> { - /** * Specifies the maximum number of concurrent background jobs (both flushes * and compactions combined). diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java index 03611a248..bac1c559a 100644 --- a/java/src/main/java/org/rocksdb/util/Environment.java +++ b/java/src/main/java/org/rocksdb/util/Environment.java @@ -64,9 +64,9 @@ public class Environment { public static String getJniLibraryName(final String name) { if (isUnix()) { final String arch = is64Bit() ? "64" : "32"; - if(isPowerPC() || isAarch64()) { + if (isPowerPC() || isAarch64()) { return String.format("%sjni-linux-%s", name, ARCH); - } else if(isS390x()) { + } else if (isS390x()) { return String.format("%sjni-linux%s", name, ARCH); } else { return String.format("%sjni-linux%s", name, arch); diff --git a/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/java/src/test/java/org/rocksdb/util/EnvironmentTest.java index 49c8bf19a..ff13ddb5d 100644 --- a/java/src/test/java/org/rocksdb/util/EnvironmentTest.java +++ b/java/src/test/java/org/rocksdb/util/EnvironmentTest.java @@ -136,16 +136,12 @@ public class EnvironmentTest { assertThat(Environment.isUnix()).isTrue(); assertThat(Environment.isAarch64()).isTrue(); assertThat(Environment.is64Bit()).isTrue(); - assertThat(Environment.getJniLibraryExtension()). - isEqualTo(".so"); - assertThat(Environment.getSharedLibraryName("rocksdb")). - isEqualTo("rocksdbjni"); - assertThat(Environment.getJniLibraryName("rocksdb")). - isEqualTo("rocksdbjni-linux-aarch64"); - assertThat(Environment.getJniLibraryFileName("rocksdb")). - isEqualTo("librocksdbjni-linux-aarch64.so"); - assertThat(Environment.getSharedLibraryFileName("rocksdb")). - isEqualTo("librocksdbjni.so"); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so"); + assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni"); + assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-linux-aarch64.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so"); } private void setEnvironmentClassFields(String osName, diff --git a/logging/auto_roll_logger.cc b/logging/auto_roll_logger.cc index 3109f0bc6..73a02a899 100644 --- a/logging/auto_roll_logger.cc +++ b/logging/auto_roll_logger.cc @@ -171,7 +171,7 @@ void AutoRollLogger::Logv(const char* format, va_list ap) { if (!logger_) { return; } - + std::shared_ptr logger; { MutexLock l(&mutex_); diff --git a/logging/env_logger.h b/logging/env_logger.h index 5d7ff7afe..7e8212dd2 100644 --- a/logging/env_logger.h +++ b/logging/env_logger.h @@ -11,10 +11,10 @@ #pragma once +#include #include #include #include "port/sys_time.h" -#include #include "file/writable_file_writer.h" #include "monitoring/iostats_context_imp.h" diff --git a/logging/env_logger_test.cc b/logging/env_logger_test.cc index 316c231fa..6e1af2d55 100644 --- a/logging/env_logger_test.cc +++ b/logging/env_logger_test.cc @@ -4,8 +4,8 @@ // (found in the LICENSE.Apache file in the root directory). // -#include "env/mock_env.h" #include "logging/env_logger.h" +#include "env/mock_env.h" #include "test_util/testharness.h" #include "test_util/testutil.h" @@ -44,9 +44,7 @@ class EnvLoggerTest : public testing::Test { return result; } - void DeleteLogFile() { - ASSERT_OK(env_->DeleteFile(kLogFile)); - } + void DeleteLogFile() { ASSERT_OK(env_->DeleteFile(kLogFile)); } static const std::string kSampleMessage; static const std::string kTestDir; diff --git a/logging/event_logger.cc b/logging/event_logger.cc index 182e282b2..4ae9d2d66 100644 --- a/logging/event_logger.cc +++ b/logging/event_logger.cc @@ -5,8 +5,8 @@ #include "logging/event_logger.h" -#include #include +#include #include #include diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index 29bf78ad7..4449ade64 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -9,10 +9,10 @@ #include "monitoring/histogram.h" -#include -#include #include #include +#include +#include #include "port/port.h" #include "util/cast_util.h" diff --git a/monitoring/perf_context_imp.h b/monitoring/perf_context_imp.h index 6e26988f1..7bf620605 100644 --- a/monitoring/perf_context_imp.h +++ b/monitoring/perf_context_imp.h @@ -27,7 +27,8 @@ extern thread_local PerfContext perf_context; #define PERF_TIMER_GUARD(metric) #define PERF_TIMER_GUARD_WITH_ENV(metric, env) #define PERF_CPU_TIMER_GUARD(metric, env) -#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, ticker_type) +#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, \ + ticker_type) #define PERF_TIMER_MEASURE(metric) #define PERF_COUNTER_ADD(metric, value) #define PERF_COUNTER_BY_LEVEL_ADD(metric, value, level) diff --git a/monitoring/statistics.cc b/monitoring/statistics.cc index 70c993b20..6942fc579 100644 --- a/monitoring/statistics.cc +++ b/monitoring/statistics.cc @@ -5,11 +5,11 @@ // #include "monitoring/statistics.h" -#include -#include "rocksdb/statistics.h" -#include "port/likely.h" #include +#include #include +#include "port/likely.h" +#include "rocksdb/statistics.h" namespace rocksdb { diff --git a/options/cf_options.cc b/options/cf_options.cc index 488c42b96..ef06eaf15 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -5,15 +5,15 @@ #include "options/cf_options.h" -#include #include +#include #include #include #include "options/db_options.h" #include "port/port.h" +#include "rocksdb/concurrent_task_limiter.h" #include "rocksdb/env.h" #include "rocksdb/options.h" -#include "rocksdb/concurrent_task_limiter.h" namespace rocksdb { diff --git a/options/options_test.cc b/options/options_test.cc index 13328c319..d3bbe87c8 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -8,9 +8,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include +#include #include #include -#include #include "cache/lru_cache.h" #include "cache/sharded_cache.h" diff --git a/port/jemalloc_helper.h b/port/jemalloc_helper.h index a9095ec98..f6f72f8cb 100644 --- a/port/jemalloc_helper.h +++ b/port/jemalloc_helper.h @@ -32,8 +32,8 @@ #if defined(OS_WIN) && defined(_MSC_VER) -// MSVC does not have weak symbol support. As long as ROCKSDB_JEMALLOC is defined, -// Jemalloc memory allocator is used. +// MSVC does not have weak symbol support. As long as ROCKSDB_JEMALLOC is +// defined, Jemalloc memory allocator is used. static inline bool HasJemalloc() { return true; } #else diff --git a/port/win/env_win.cc b/port/win/env_win.cc index 7718ebd72..c12d0ee4f 100644 --- a/port/win/env_win.cc +++ b/port/win/env_win.cc @@ -979,7 +979,8 @@ uint64_t WinEnvIO::NowMicros() { return li.QuadPart; } using namespace std::chrono; - return duration_cast(system_clock::now().time_since_epoch()).count(); + return duration_cast(system_clock::now().time_since_epoch()) + .count(); } uint64_t WinEnvIO::NowNanos() { diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 9dca2a6f0..5f48b78ca 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -7,8 +7,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include #include +#include #include #include diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index f6afab43f..4c58c23ec 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -2019,9 +2019,10 @@ IndexBlockIter* BlockBasedTable::InitBlockIterator( // If input_iter is null, new a iterator // If input_iter is not null, update this iter and return it template -TBlockIter* BlockBasedTable::NewDataBlockIterator( - const ReadOptions& ro, CachableEntry& block, TBlockIter* input_iter, - Status s) const { +TBlockIter* BlockBasedTable::NewDataBlockIterator(const ReadOptions& ro, + CachableEntry& block, + TBlockIter* input_iter, + Status s) const { PERF_TIMER_GUARD(new_table_block_iter_nanos); TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter; @@ -2167,11 +2168,10 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache( SequenceNumber seq_no = rep_->get_global_seqno(block_type); // If filling cache is allowed and a cache is configured, try to put the // block to the cache. - s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed, - block_entry, contents, - raw_block_comp_type, uncompression_dict, seq_no, - GetMemoryAllocator(rep_->table_options), - block_type, get_context); + s = PutDataBlockToCache( + key, ckey, block_cache, block_cache_compressed, block_entry, + contents, raw_block_comp_type, uncompression_dict, seq_no, + GetMemoryAllocator(rep_->table_options), block_type, get_context); } } } @@ -2344,12 +2344,10 @@ void BlockBasedTable::RetrieveMultipleBlocks( // BlockContents so it can free the memory assert(req.result.data() == req.scratch); std::unique_ptr raw_block(req.scratch); - raw_block_contents = BlockContents(std::move(raw_block), - handle.size()); + raw_block_contents = BlockContents(std::move(raw_block), handle.size()); } else { // We used the scratch buffer, so no need to free anything - raw_block_contents = BlockContents(Slice(req.scratch, - handle.size())); + raw_block_contents = BlockContents(Slice(req.scratch, handle.size())); } #ifndef NDEBUG raw_block_contents.is_raw_block = true; @@ -2370,35 +2368,36 @@ void BlockBasedTable::RetrieveMultipleBlocks( // MaybeReadBlockAndLoadToCache will insert into the block caches if // necessary. Since we're passing the raw block contents, it will // avoid looking up the block cache - s = MaybeReadBlockAndLoadToCache(nullptr, options, handle, - uncompression_dict, block_entry, BlockType::kData, - mget_iter->get_context, &lookup_data_block_context, - &raw_block_contents); + s = MaybeReadBlockAndLoadToCache( + nullptr, options, handle, uncompression_dict, block_entry, + BlockType::kData, mget_iter->get_context, + &lookup_data_block_context, &raw_block_contents); } else { CompressionType compression_type = - raw_block_contents.get_compression_type(); + raw_block_contents.get_compression_type(); BlockContents contents; if (compression_type != kNoCompression) { UncompressionContext context(compression_type); UncompressionInfo info(context, uncompression_dict, compression_type); s = UncompressBlockContents(info, req.result.data(), handle.size(), - &contents, footer.version(), rep_->ioptions, - memory_allocator); + &contents, footer.version(), + rep_->ioptions, memory_allocator); } else { if (scratch != nullptr) { // If we used the scratch buffer, then the contents need to be // copied to heap Slice raw = Slice(req.result.data(), handle.size()); - contents = BlockContents(CopyBufferToHeap( - GetMemoryAllocator(rep_->table_options), raw), - handle.size()); + contents = BlockContents( + CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw), + handle.size()); } else { contents = std::move(raw_block_contents); } } if (s.ok()) { - (*results)[idx_in_batch].SetOwnedValue(new Block(std::move(contents), - global_seqno, read_amp_bytes_per_bit, ioptions.statistics)); + (*results)[idx_in_batch].SetOwnedValue( + new Block(std::move(contents), global_seqno, + read_amp_bytes_per_bit, ioptions.statistics)); } } } @@ -3036,7 +3035,8 @@ void BlockBasedTableIterator::CheckOutOfBound() { } template -void BlockBasedTableIterator::CheckDataBlockWithinUpperBound() { +void BlockBasedTableIterator::CheckDataBlockWithinUpperBound() { if (read_options_.iterate_upper_bound != nullptr && block_iter_points_to_real_block_) { data_block_within_upper_bound_ = @@ -3047,7 +3047,8 @@ void BlockBasedTableIterator::CheckDataBlockWithinUpperBound InternalIterator* BlockBasedTable::NewIterator( const ReadOptions& read_options, const SliceTransform* prefix_extractor, - Arena* arena, bool skip_filters, TableReaderCaller caller, size_t compaction_readahead_size) { + Arena* arena, bool skip_filters, TableReaderCaller caller, + size_t compaction_readahead_size) { BlockCacheLookupContext lookup_context{caller}; bool need_upper_bound_check = PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor); @@ -3068,10 +3069,11 @@ InternalIterator* BlockBasedTable::NewIterator( arena->AllocateAligned(sizeof(BlockBasedTableIterator)); return new (mem) BlockBasedTableIterator( this, read_options, rep_->internal_comparator, - NewIndexIterator(read_options, need_upper_bound_check && - rep_->index_type == BlockBasedTableOptions::kHashSearch, - /*input_iter=*/nullptr, /*get_context=*/nullptr, - &lookup_context), + NewIndexIterator( + read_options, + need_upper_bound_check && + rep_->index_type == BlockBasedTableOptions::kHashSearch, + /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context), !skip_filters && !read_options.total_order_seek && prefix_extractor != nullptr, need_upper_bound_check, prefix_extractor, BlockType::kData, caller, @@ -3395,7 +3397,7 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options, ro.read_tier = kBlockCacheTier; for (auto miter = data_block_range.begin(); - miter != data_block_range.end(); ++miter) { + miter != data_block_range.end(); ++miter) { const Slice& key = miter->ikey; iiter->Seek(miter->ikey); @@ -3405,9 +3407,9 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options, } if (!iiter->Valid() || (!v.first_internal_key.empty() && !skip_filters && - UserComparatorWrapper(rep_->internal_comparator.user_comparator()) - .Compare(ExtractUserKey(key), - ExtractUserKey(v.first_internal_key)) < 0)) { + UserComparatorWrapper(rep_->internal_comparator.user_comparator()) + .Compare(ExtractUserKey(key), + ExtractUserKey(v.first_internal_key)) < 0)) { // The requested key falls between highest key in previous block and // lowest key in current block. *(miter->s) = iiter->status(); diff --git a/table/block_based/filter_block.h b/table/block_based/filter_block.h index 38c3cc05f..90766b114 100644 --- a/table/block_based/filter_block.h +++ b/table/block_based/filter_block.h @@ -138,8 +138,8 @@ class FilterBlockReader { GetContext* const get_context = iter->get_context; if (prefix_extractor->InDomain(ukey) && !PrefixMayMatch(prefix_extractor->Transform(ukey), prefix_extractor, - block_offset, no_io, &ikey, get_context, - lookup_context)) { + block_offset, no_io, &ikey, get_context, + lookup_context)) { range->SkipKey(iter); } } diff --git a/table/block_based/full_filter_block.cc b/table/block_based/full_filter_block.cc index 905bbd217..b00949c3c 100644 --- a/table/block_based/full_filter_block.cc +++ b/table/block_based/full_filter_block.cc @@ -3,8 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include #include "table/block_based/full_filter_block.h" +#include #include "monitoring/perf_context_imp.h" #include "port/malloc.h" @@ -218,8 +218,7 @@ void FullFilterBlockReader::PrefixesMayMatch( } void FullFilterBlockReader::MayMatch( - MultiGetRange* range, bool no_io, - const SliceTransform* prefix_extractor, + MultiGetRange* range, bool no_io, const SliceTransform* prefix_extractor, BlockCacheLookupContext* lookup_context) const { CachableEntry filter_block; @@ -252,8 +251,7 @@ void FullFilterBlockReader::MayMatch( autovector prefixes; int num_keys = 0; MultiGetRange filter_range(*range, range->begin(), range->end()); - for (auto iter = filter_range.begin(); - iter != filter_range.end(); ++iter) { + for (auto iter = filter_range.begin(); iter != filter_range.end(); ++iter) { if (!prefix_extractor) { keys[num_keys++] = &iter->ukey; } else if (prefix_extractor->InDomain(iter->ukey)) { @@ -266,15 +264,14 @@ void FullFilterBlockReader::MayMatch( filter_bits_reader->MayMatch(num_keys, &keys[0], &may_match[0]); int i = 0; - for (auto iter = filter_range.begin(); - iter != filter_range.end(); ++iter) { + for (auto iter = filter_range.begin(); iter != filter_range.end(); ++iter) { if (!may_match[i]) { // Update original MultiGet range to skip this key. The filter_range // was temporarily used just to skip keys not in prefix_extractor domain range->SkipKey(iter); PERF_COUNTER_ADD(bloom_sst_miss_count, 1); } else { - //PERF_COUNTER_ADD(bloom_sst_hit_count, 1); + // PERF_COUNTER_ADD(bloom_sst_hit_count, 1); PerfContext* perf_ctx = get_perf_context(); perf_ctx->bloom_sst_hit_count++; } diff --git a/table/block_based/uncompression_dict_reader.cc b/table/block_based/uncompression_dict_reader.cc index 559e8af5c..10ceab16f 100644 --- a/table/block_based/uncompression_dict_reader.cc +++ b/table/block_based/uncompression_dict_reader.cc @@ -102,12 +102,12 @@ size_t UncompressionDictReader::ApproximateMemoryUsage() const { : 0; #ifdef ROCKSDB_MALLOC_USABLE_SIZE - usage += malloc_usable_size(const_cast(this)); + usage += malloc_usable_size(const_cast(this)); #else - usage += sizeof(*this); + usage += sizeof(*this); #endif // ROCKSDB_MALLOC_USABLE_SIZE - return usage; + return usage; } bool UncompressionDictReader::cache_dictionary_blocks() const { diff --git a/table/cuckoo/cuckoo_table_reader.h b/table/cuckoo/cuckoo_table_reader.h index d90a14757..fb0445bcf 100644 --- a/table/cuckoo/cuckoo_table_reader.h +++ b/table/cuckoo/cuckoo_table_reader.h @@ -51,7 +51,8 @@ class CuckooTableReader: public TableReader { InternalIterator* NewIterator(const ReadOptions&, const SliceTransform* prefix_extractor, Arena* arena, bool skip_filters, - TableReaderCaller caller, size_t compaction_readahead_size = 0) override; + TableReaderCaller caller, + size_t compaction_readahead_size = 0) override; void Prepare(const Slice& target) override; // Report an approximation of how much memory has been used. diff --git a/table/cuckoo/cuckoo_table_reader_test.cc b/table/cuckoo/cuckoo_table_reader_test.cc index 8043d36ab..2dfe887ba 100644 --- a/table/cuckoo/cuckoo_table_reader_test.cc +++ b/table/cuckoo/cuckoo_table_reader_test.cc @@ -14,9 +14,9 @@ int main() { #else #include -#include -#include #include +#include +#include #include "memory/arena.h" #include "table/cuckoo/cuckoo_table_builder.h" diff --git a/table/mock_table.cc b/table/mock_table.cc index fb092bd5e..50cbb2024 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -28,7 +28,8 @@ stl_wrappers::KVMap MakeMockFile( InternalIterator* MockTableReader::NewIterator( const ReadOptions&, const SliceTransform* /* prefix_extractor */, - Arena* /*arena*/, bool /*skip_filters*/, TableReaderCaller /*caller*/, size_t /*compaction_readahead_size*/) { + Arena* /*arena*/, bool /*skip_filters*/, TableReaderCaller /*caller*/, + size_t /*compaction_readahead_size*/) { return new MockTableIterator(table_); } diff --git a/table/mock_table.h b/table/mock_table.h index 8ba937ac8..81d178810 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -42,7 +42,7 @@ class MockTableReader : public TableReader { const SliceTransform* prefix_extractor, Arena* arena, bool skip_filters, TableReaderCaller caller, - size_t compaction_readahead_size = 0) override; + size_t compaction_readahead_size = 0) override; Status Get(const ReadOptions& readOptions, const Slice& key, GetContext* get_context, const SliceTransform* prefix_extractor, diff --git a/table/plain/plain_table_bloom.cc b/table/plain/plain_table_bloom.cc index 92461bc86..0de541b56 100644 --- a/table/plain/plain_table_bloom.cc +++ b/table/plain/plain_table_bloom.cc @@ -5,13 +5,12 @@ #include "table/plain/plain_table_bloom.h" -#include #include +#include #include "util/dynamic_bloom.h" #include "memory/allocator.h" - namespace rocksdb { namespace { @@ -28,7 +27,7 @@ uint32_t GetTotalBitsForLocality(uint32_t total_bits) { return num_blocks * (CACHE_LINE_SIZE * 8); } -} +} // namespace PlainTableBloomV1::PlainTableBloomV1(uint32_t num_probes) : kTotalBits(0), kNumBlocks(0), kNumProbes(num_probes), data_(nullptr) {} @@ -40,10 +39,10 @@ void PlainTableBloomV1::SetRawData(char* raw_data, uint32_t total_bits, kNumBlocks = num_blocks; } -void PlainTableBloomV1::SetTotalBits(Allocator* allocator, - uint32_t total_bits, uint32_t locality, - size_t huge_page_tlb_size, - Logger* logger) { +void PlainTableBloomV1::SetTotalBits(Allocator* allocator, uint32_t total_bits, + uint32_t locality, + size_t huge_page_tlb_size, + Logger* logger) { kTotalBits = (locality > 0) ? GetTotalBitsForLocality(total_bits) : (total_bits + 7) / 8 * 8; kNumBlocks = (locality > 0) ? (kTotalBits / (CACHE_LINE_SIZE * 8)) : 0; @@ -66,7 +65,8 @@ void PlainTableBloomV1::SetTotalBits(Allocator* allocator, data_ = raw; } -void BloomBlockBuilder::AddKeysHashes(const std::vector& keys_hashes) { +void BloomBlockBuilder::AddKeysHashes( + const std::vector& keys_hashes) { for (auto hash : keys_hashes) { bloom_.AddHash(hash); } diff --git a/table/plain/plain_table_bloom.h b/table/plain/plain_table_bloom.h index ae08db1aa..8da256b3b 100644 --- a/table/plain/plain_table_bloom.h +++ b/table/plain/plain_table_bloom.h @@ -4,8 +4,8 @@ // (found in the LICENSE.Apache file in the root directory). #pragma once -#include #include +#include #include "rocksdb/slice.h" diff --git a/table/plain/plain_table_reader.h b/table/plain/plain_table_reader.h index fa248d54f..e2d0e8592 100644 --- a/table/plain/plain_table_reader.h +++ b/table/plain/plain_table_reader.h @@ -83,7 +83,8 @@ class PlainTableReader: public TableReader { InternalIterator* NewIterator(const ReadOptions&, const SliceTransform* prefix_extractor, Arena* arena, bool skip_filters, - TableReaderCaller caller, size_t compaction_readahead_size = 0) override; + TableReaderCaller caller, + size_t compaction_readahead_size = 0) override; void Prepare(const Slice& target) override; diff --git a/table/table_reader.h b/table/table_reader.h index bf4dad766..712c20c9a 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -45,11 +45,12 @@ class TableReader { // all the states but those allocated in arena. // skip_filters: disables checking the bloom filters even if they exist. This // option is effective only for block-based table format. - // compaction_readahead_size: its value will only be used if caller = kCompaction - virtual InternalIterator* NewIterator(const ReadOptions&, - const SliceTransform* prefix_extractor, - Arena* arena, bool skip_filters, - TableReaderCaller caller, size_t compaction_readahead_size = 0) = 0; + // compaction_readahead_size: its value will only be used if caller = + // kCompaction + virtual InternalIterator* NewIterator( + const ReadOptions&, const SliceTransform* prefix_extractor, Arena* arena, + bool skip_filters, TableReaderCaller caller, + size_t compaction_readahead_size = 0) = 0; virtual FragmentedRangeTombstoneIterator* NewRangeTombstoneIterator( const ReadOptions& /*read_options*/) { diff --git a/test_util/transaction_test_util.cc b/test_util/transaction_test_util.cc index b71ad0a1f..7043eb2d7 100644 --- a/test_util/transaction_test_util.cc +++ b/test_util/transaction_test_util.cc @@ -6,8 +6,8 @@ #include "test_util/transaction_test_util.h" -#include #include +#include #include #include #include diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index caf73e7f7..4f2a9f8bf 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -16,12 +16,12 @@ #include #endif #include -#include #include #include #include #include #include +#include #include #include #include diff --git a/tools/db_stress.cc b/tools/db_stress.cc index fb002592e..cb8f96765 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -29,13 +29,13 @@ int main() { #else #include -#include #include #include #include #include #include #include +#include #include #include #include diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 83a303825..50514c8a0 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -226,8 +226,8 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) { parsed_params.flags); } else if (parsed_params.cmd == DropColumnFamilyCommand::Name()) { return new DropColumnFamilyCommand(parsed_params.cmd_params, - parsed_params.option_map, - parsed_params.flags); + parsed_params.option_map, + parsed_params.flags); } else if (parsed_params.cmd == DBFileDumperCommand::Name()) { return new DBFileDumperCommand(parsed_params.cmd_params, parsed_params.option_map, @@ -1176,7 +1176,7 @@ DropColumnFamilyCommand::DropColumnFamilyCommand( const std::vector& params, const std::map& options, const std::vector& flags) - : LDBCommand(options, flags, true, {ARG_DB}) { + : LDBCommand(options, flags, true, {ARG_DB}) { if (params.size() != 1) { exec_state_ = LDBCommandExecuteResult::Failed( "The name of column family to drop must be specified"); diff --git a/tools/trace_analyzer_tool.cc b/tools/trace_analyzer_tool.cc index c5576873d..ae0b20bea 100644 --- a/tools/trace_analyzer_tool.cc +++ b/tools/trace_analyzer_tool.cc @@ -573,8 +573,9 @@ Status TraceAnalyzer::MakeStatistics() { // output the access count distribution if (FLAGS_output_access_count_stats && stat.second.a_count_dist_f) { for (auto& record : stat.second.a_count_stats) { - ret = snprintf(buffer_, sizeof(buffer_), "access_count: %" PRIu64 " num: %" PRIu64 "\n", - record.first, record.second); + ret = snprintf(buffer_, sizeof(buffer_), + "access_count: %" PRIu64 " num: %" PRIu64 "\n", + record.first, record.second); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -597,8 +598,8 @@ Status TraceAnalyzer::MakeStatistics() { get_mid = true; } if (FLAGS_output_key_distribution && stat.second.a_key_size_f) { - ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n", record.first, - record.second); + ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n", + record.first, record.second); if (ret < 0) { return Status::IOError("Format output failed"); } @@ -626,9 +627,9 @@ Status TraceAnalyzer::MakeStatistics() { (type == TraceOperationType::kPut || type == TraceOperationType::kMerge)) { ret = snprintf(buffer_, sizeof(buffer_), - "Number_of_value_size_between %" PRIu64 " and %" PRIu64 - " is: %" PRIu64 "\n", - v_begin, v_end, record.second); + "Number_of_value_size_between %" PRIu64 " and %" PRIu64 + " is: %" PRIu64 "\n", + v_begin, v_end, record.second); if (ret < 0) { return Status::IOError("Format output failed"); } @@ -676,9 +677,10 @@ Status TraceAnalyzer::MakeStatisticKeyStatsOrPrefix(TraceStats& stats) { succ_ratio = (static_cast(record.second.succ_count)) / record.second.access_count; } - ret = snprintf(buffer_, sizeof(buffer_), "%u %zu %" PRIu64 " %" PRIu64 " %f\n", - record.second.cf_id, record.second.value_size, - record.second.key_id, record.second.access_count, succ_ratio); + ret = snprintf(buffer_, sizeof(buffer_), + "%u %zu %" PRIu64 " %" PRIu64 " %f\n", record.second.cf_id, + record.second.value_size, record.second.key_id, + record.second.access_count, succ_ratio); if (ret < 0) { return Status::IOError("Format output failed"); } @@ -704,9 +706,11 @@ Status TraceAnalyzer::MakeStatisticKeyStatsOrPrefix(TraceStats& stats) { prefix_succ_ratio = (static_cast(prefix_succ_access)) / prefix_access; } - ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 " %" PRIu64 " %f %f %s\n", - record.second.key_id, prefix_access, prefix_count, - prefix_ave_access, prefix_succ_ratio, prefix_out.c_str()); + ret = + snprintf(buffer_, sizeof(buffer_), + "%" PRIu64 " %" PRIu64 " %" PRIu64 " %f %f %s\n", + record.second.key_id, prefix_access, prefix_count, + prefix_ave_access, prefix_succ_ratio, prefix_out.c_str()); if (ret < 0) { return Status::IOError("Format output failed"); } @@ -871,7 +875,8 @@ Status TraceAnalyzer::MakeStatisticQPS() { cur_ratio = (static_cast(find_time->second)) / cur_uni_key; cur_num = find_time->second; } - ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %.12f\n", cur_num, cur_ratio); + ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %.12f\n", + cur_num, cur_ratio); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -889,8 +894,8 @@ Status TraceAnalyzer::MakeStatisticQPS() { if (FLAGS_output_prefix_cut > 0 && stat.second.a_top_qps_prefix_f) { while (!stat.second.top_k_qps_sec.empty()) { ret = snprintf(buffer_, sizeof(buffer_), "At time: %u with QPS: %u\n", - stat.second.top_k_qps_sec.top().second, - stat.second.top_k_qps_sec.top().first); + stat.second.top_k_qps_sec.top().second, + stat.second.top_k_qps_sec.top().first); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -907,8 +912,9 @@ Status TraceAnalyzer::MakeStatisticQPS() { for (auto& qps_prefix : stat.second.a_qps_prefix_stats[qps_time]) { std::string qps_prefix_out = rocksdb::LDBCommand::StringToHex(qps_prefix.first); - ret = snprintf(buffer_, sizeof(buffer_), "The prefix: %s Access count: %u\n", - qps_prefix_out.c_str(), qps_prefix.second); + ret = snprintf(buffer_, sizeof(buffer_), + "The prefix: %s Access count: %u\n", + qps_prefix_out.c_str(), qps_prefix.second); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -1017,9 +1023,10 @@ Status TraceAnalyzer::ReProcessing() { if (found != stat.a_key_stats.end()) { key_id = found->second.key_id; } - ret = snprintf(buffer_, sizeof(buffer_), "%u %" PRIu64 " %" PRIu64 "\n", - stat.time_series.front().type, - stat.time_series.front().ts, key_id); + ret = + snprintf(buffer_, sizeof(buffer_), "%u %" PRIu64 " %" PRIu64 "\n", + stat.time_series.front().type, + stat.time_series.front().ts, key_id); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -1065,9 +1072,9 @@ Status TraceAnalyzer::ReProcessing() { TraceStats& stat = ta_[type].stats[cf_id]; if (stat.w_key_f) { if (stat.a_key_stats.find(input_key) != stat.a_key_stats.end()) { - ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n", - cfs_[cf_id].w_count, - stat.a_key_stats[input_key].access_count); + ret = snprintf(buffer_, sizeof(buffer_), + "%" PRIu64 " %" PRIu64 "\n", cfs_[cf_id].w_count, + stat.a_key_stats[input_key].access_count); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -1087,8 +1094,8 @@ Status TraceAnalyzer::ReProcessing() { prefix[type] = input_key.substr(0, FLAGS_output_prefix_cut); std::string prefix_out = rocksdb::LDBCommand::StringToHex(prefix[type]); - ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %s\n", cfs_[cf_id].w_count, - prefix_out.c_str()); + ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %s\n", + cfs_[cf_id].w_count, prefix_out.c_str()); if (ret < 0) { return Status::IOError("Format the output failed"); } @@ -1904,8 +1911,8 @@ Status TraceAnalyzer::WriteTraceSequence(const uint32_t& type, const uint64_t ts) { std::string hex_key = rocksdb::LDBCommand::StringToHex(key); int ret; - ret = - snprintf(buffer_, sizeof(buffer_), "%u %u %zu %" PRIu64 "\n", type, cf_id, value_size, ts); + ret = snprintf(buffer_, sizeof(buffer_), "%u %u %zu %" PRIu64 "\n", type, + cf_id, value_size, ts); if (ret < 0) { return Status::IOError("failed to format the output"); } diff --git a/tools/write_stress.cc b/tools/write_stress.cc index 95948ef57..7e7a1a765 100644 --- a/tools/write_stress.cc +++ b/tools/write_stress.cc @@ -56,8 +56,8 @@ int main() { } #else -#include #include +#include #include #include #include diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 261461521..cda5e73e1 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -23,8 +23,8 @@ int main() { #include "table/full_filter_bits_builder.h" #include "test_util/testharness.h" #include "test_util/testutil.h" -#include "util/hash.h" #include "util/gflags_compat.h" +#include "util/hash.h" using GFLAGS_NAMESPACE::ParseCommandLineFlags; @@ -98,9 +98,7 @@ class BloomTest : public testing::Test { return filter_.size(); } - Slice FilterData() const { - return Slice(filter_); - } + Slice FilterData() const { return Slice(filter_); } void DumpFilter() { fprintf(stderr, "F("); @@ -190,28 +188,28 @@ TEST_F(BloomTest, VaryingLengths) { TEST_F(BloomTest, Schema) { char buffer[sizeof(int)]; - ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5 + ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5 for (int key = 0; key < 87; key++) { Add(Key(key, buffer)); } Build(); ASSERT_EQ(BloomHash(FilterData()), 3589896109U); - ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6 + ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6 for (int key = 0; key < 87; key++) { Add(Key(key, buffer)); } Build(); ASSERT_EQ(BloomHash(FilterData()), 969445585); - ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7 + ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7 for (int key = 0; key < 87; key++) { Add(Key(key, buffer)); } Build(); ASSERT_EQ(BloomHash(FilterData()), 1694458207); - ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6 + ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6 for (int key = 0; key < 87; key++) { Add(Key(key, buffer)); } @@ -235,7 +233,6 @@ TEST_F(BloomTest, Schema) { ResetPolicy(); } - // Different bits-per-byte class FullBloomTest : public testing::Test { @@ -287,9 +284,7 @@ class FullBloomTest : public testing::Test { return filter_size_; } - Slice FilterData() { - return Slice(buf_.get(), filter_size_); - } + Slice FilterData() { return Slice(buf_.get(), filter_size_); } bool Matches(const Slice& s) { if (bits_reader_ == nullptr) { @@ -381,9 +376,8 @@ TEST_F(FullBloomTest, FullVaryingLengths) { } namespace { -inline uint32_t SelectByCacheLineSize(uint32_t for64, - uint32_t for128, - uint32_t for256) { +inline uint32_t SelectByCacheLineSize(uint32_t for64, uint32_t for128, + uint32_t for256) { (void)for64; (void)for128; (void)for256; @@ -394,10 +388,10 @@ inline uint32_t SelectByCacheLineSize(uint32_t for64, #elif CACHE_LINE_SIZE == 256 return for256; #else - #error "CACHE_LINE_SIZE unknown or unrecognized" +#error "CACHE_LINE_SIZE unknown or unrecognized" #endif } -} // namespace +} // namespace // Ensure the implementation doesn't accidentally change in an // incompatible way @@ -407,7 +401,7 @@ TEST_F(FullBloomTest, Schema) { // Use enough keys so that changing bits / key by 1 is guaranteed to // change number of allocated cache lines. So keys > max cache line bits. - ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5 + ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5 for (int key = 0; key < 2087; key++) { Add(Key(key, buffer)); } @@ -415,7 +409,7 @@ TEST_F(FullBloomTest, Schema) { ASSERT_EQ(BloomHash(FilterData()), SelectByCacheLineSize(1302145999, 2811644657U, 756553699)); - ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6 + ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6 for (int key = 0; key < 2087; key++) { Add(Key(key, buffer)); } @@ -423,7 +417,7 @@ TEST_F(FullBloomTest, Schema) { ASSERT_EQ(BloomHash(FilterData()), SelectByCacheLineSize(2092755149, 661139132, 1182970461)); - ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7 + ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7 for (int key = 0; key < 2087; key++) { Add(Key(key, buffer)); } @@ -431,7 +425,7 @@ TEST_F(FullBloomTest, Schema) { ASSERT_EQ(BloomHash(FilterData()), SelectByCacheLineSize(3755609649U, 1812694762, 1449142939)); - ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6 + ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6 for (int key = 0; key < 2087; key++) { Add(Key(key, buffer)); } diff --git a/util/crc32c_arm64.cc b/util/crc32c_arm64.cc index 79081298d..591c623a5 100644 --- a/util/crc32c_arm64.cc +++ b/util/crc32c_arm64.cc @@ -15,21 +15,22 @@ #ifdef HAVE_ARM64_CRYPTO /* unfolding to compute 8 * 3 = 24 bytes parallelly */ -#define CRC32C24BYTES(ITR) \ - crc1 = crc32c_u64(crc1, *(buf64 + BLK_LENGTH + (ITR)));\ - crc2 = crc32c_u64(crc2, *(buf64 + BLK_LENGTH*2 + (ITR)));\ +#define CRC32C24BYTES(ITR) \ + crc1 = crc32c_u64(crc1, *(buf64 + BLK_LENGTH + (ITR))); \ + crc2 = crc32c_u64(crc2, *(buf64 + BLK_LENGTH * 2 + (ITR))); \ crc0 = crc32c_u64(crc0, *(buf64 + (ITR))); /* unfolding to compute 24 * 7 = 168 bytes parallelly */ -#define CRC32C7X24BYTES(ITR) do {\ - CRC32C24BYTES((ITR)*7+0) \ - CRC32C24BYTES((ITR)*7+1) \ - CRC32C24BYTES((ITR)*7+2) \ - CRC32C24BYTES((ITR)*7+3) \ - CRC32C24BYTES((ITR)*7+4) \ - CRC32C24BYTES((ITR)*7+5) \ - CRC32C24BYTES((ITR)*7+6) \ -} while(0) +#define CRC32C7X24BYTES(ITR) \ + do { \ + CRC32C24BYTES((ITR)*7 + 0) \ + CRC32C24BYTES((ITR)*7 + 1) \ + CRC32C24BYTES((ITR)*7 + 2) \ + CRC32C24BYTES((ITR)*7 + 3) \ + CRC32C24BYTES((ITR)*7 + 4) \ + CRC32C24BYTES((ITR)*7 + 5) \ + CRC32C24BYTES((ITR)*7 + 6) \ + } while (0) #endif uint32_t crc32c_runtime_check(void) { @@ -45,15 +46,15 @@ uint32_t crc32c_arm64(uint32_t crc, unsigned char const *data, crc ^= 0xffffffff; #ifdef HAVE_ARM64_CRYPTO - /* Crc32c Parallel computation - * Algorithm comes from Intel whitepaper: - * crc-iscsi-polynomial-crc32-instruction-paper - * - * Input data is divided into three equal-sized blocks - * Three parallel blocks (crc0, crc1, crc2) for 1024 Bytes - * One Block: 42(BLK_LENGTH) * 8(step length: crc32c_u64) bytes - */ - #define BLK_LENGTH 42 +/* Crc32c Parallel computation + * Algorithm comes from Intel whitepaper: + * crc-iscsi-polynomial-crc32-instruction-paper + * + * Input data is divided into three equal-sized blocks + * Three parallel blocks (crc0, crc1, crc2) for 1024 Bytes + * One Block: 42(BLK_LENGTH) * 8(step length: crc32c_u64) bytes + */ +#define BLK_LENGTH 42 while (length >= 1024) { uint64_t t0, t1; uint32_t crc0 = 0, crc1 = 0, crc2 = 0; @@ -97,31 +98,29 @@ uint32_t crc32c_arm64(uint32_t crc, unsigned char const *data, length -= 1024; } - if (length == 0) - return crc ^ (0xffffffffU); + if (length == 0) return crc ^ (0xffffffffU); #endif buf8 = (const uint8_t *)buf64; while (length >= 8) { - crc = crc32c_u64(crc, *(const uint64_t*)buf8); + crc = crc32c_u64(crc, *(const uint64_t *)buf8); buf8 += 8; length -= 8; } /* The following is more efficient than the straight loop */ if (length >= 4) { - crc = crc32c_u32(crc, *(const uint32_t*)buf8); + crc = crc32c_u32(crc, *(const uint32_t *)buf8); buf8 += 4; length -= 4; } if (length >= 2) { - crc = crc32c_u16(crc, *(const uint16_t*)buf8); + crc = crc32c_u16(crc, *(const uint16_t *)buf8); buf8 += 2; length -= 2; } - if (length >= 1) - crc = crc32c_u8(crc, *buf8); + if (length >= 1) crc = crc32c_u8(crc, *buf8); crc ^= 0xffffffff; return crc; diff --git a/util/crc32c_arm64.h b/util/crc32c_arm64.h index ec87720ec..66fe30c14 100644 --- a/util/crc32c_arm64.h +++ b/util/crc32c_arm64.h @@ -13,7 +13,7 @@ #ifdef __ARM_FEATURE_CRC32 #define HAVE_ARM64_CRC #include -#define crc32c_u8(crc, v) __crc32cb(crc, v) +#define crc32c_u8(crc, v) __crc32cb(crc, v) #define crc32c_u16(crc, v) __crc32ch(crc, v) #define crc32c_u32(crc, v) __crc32cw(crc, v) #define crc32c_u64(crc, v) __crc32cd(crc, v) diff --git a/util/dynamic_bloom.cc b/util/dynamic_bloom.cc index 26ff1c3aa..ebbae149d 100644 --- a/util/dynamic_bloom.cc +++ b/util/dynamic_bloom.cc @@ -23,16 +23,15 @@ uint32_t roundUpToPow2(uint32_t x) { } return rv; } - } DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits, - uint32_t num_probes, - size_t huge_page_tlb_size, Logger* logger) + uint32_t num_probes, size_t huge_page_tlb_size, + Logger* logger) // Round down, except round up with 1 : kNumDoubleProbes((num_probes + (num_probes == 1)) / 2) { - assert(num_probes % 2 == 0); // limitation of current implementation - assert(num_probes <= 10); // limitation of current implementation + assert(num_probes % 2 == 0); // limitation of current implementation + assert(num_probes <= 10); // limitation of current implementation assert(kNumDoubleProbes > 0); // Determine how much to round off + align by so that x ^ i (that's xor) is diff --git a/util/dynamic_bloom.h b/util/dynamic_bloom.h index cf6ac4060..312d2805b 100644 --- a/util/dynamic_bloom.h +++ b/util/dynamic_bloom.h @@ -44,10 +44,8 @@ class DynamicBloom { // it to be allocated, like: // sysctl -w vm.nr_hugepages=20 // See linux doc Documentation/vm/hugetlbpage.txt - explicit DynamicBloom(Allocator* allocator, - uint32_t total_bits, - uint32_t num_probes = 6, - size_t huge_page_tlb_size = 0, + explicit DynamicBloom(Allocator* allocator, uint32_t total_bits, + uint32_t num_probes = 6, size_t huge_page_tlb_size = 0, Logger* logger = nullptr); ~DynamicBloom() {} @@ -159,8 +157,8 @@ inline bool DynamicBloom::MayContainHash(uint32_t h32) const { uint64_t h = 0x9e3779b97f4a7c13ULL * h32; for (unsigned i = 0;; ++i) { // Two bit probes per uint64_t probe - uint64_t mask = ((uint64_t)1 << (h & 63)) - | ((uint64_t)1 << ((h >> 6) & 63)); + uint64_t mask = + ((uint64_t)1 << (h & 63)) | ((uint64_t)1 << ((h >> 6) & 63)); uint64_t val = data_[a ^ i].load(std::memory_order_relaxed); if (i + 1 >= kNumDoubleProbes) { return (val & mask) == mask; @@ -179,8 +177,8 @@ inline void DynamicBloom::AddHash(uint32_t h32, const OrFunc& or_func) { uint64_t h = 0x9e3779b97f4a7c13ULL * h32; for (unsigned i = 0;; ++i) { // Two bit probes per uint64_t probe - uint64_t mask = ((uint64_t)1 << (h & 63)) - | ((uint64_t)1 << ((h >> 6) & 63)); + uint64_t mask = + ((uint64_t)1 << (h & 63)) | ((uint64_t)1 << ((h >> 6) & 63)); or_func(&data_[a ^ i], mask); if (i + 1 >= kNumDoubleProbes) { return; diff --git a/util/dynamic_bloom_test.cc b/util/dynamic_bloom_test.cc index 4feaa1f64..e4a8c66e3 100644 --- a/util/dynamic_bloom_test.cc +++ b/util/dynamic_bloom_test.cc @@ -11,9 +11,9 @@ int main() { } #else -#include #include #include +#include #include #include #include @@ -253,8 +253,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) { threads.pop_back(); } - fprintf(stderr, "dynamic bloom, avg parallel add latency %3g" - " nanos/key\n", + fprintf(stderr, + "dynamic bloom, avg parallel add latency %3g" + " nanos/key\n", static_cast(elapsed) / num_threads / num_keys); elapsed = 0; @@ -276,8 +277,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) { threads.pop_back(); } - fprintf(stderr, "dynamic bloom, avg parallel hit latency %3g" - " nanos/key\n", + fprintf(stderr, + "dynamic bloom, avg parallel hit latency %3g" + " nanos/key\n", static_cast(elapsed) / num_threads / num_keys); elapsed = 0; @@ -286,8 +288,7 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) { KeyMaker km; StopWatchNano timer(Env::Default()); timer.Start(); - for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys; - i += num_threads) { + for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys; i += num_threads) { bool f = std_bloom.MayContain(km.Seq(i)); if (f) { ++false_positives; @@ -303,8 +304,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) { threads.pop_back(); } - fprintf(stderr, "dynamic bloom, avg parallel miss latency %3g" - " nanos/key, %f%% false positive rate\n", + fprintf(stderr, + "dynamic bloom, avg parallel miss latency %3g" + " nanos/key, %f%% false positive rate\n", static_cast(elapsed) / num_threads / num_keys, false_positives.load() * 100.0 / num_keys); } diff --git a/util/hash_test.cc b/util/hash_test.cc index 9d8eb1fa1..dcfe39fbc 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -52,10 +52,14 @@ TEST(HashTest, Values) { EXPECT_EQ(Hash("\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 8, kSeed), 3530274698u); EXPECT_EQ(Hash("\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 8, kSeed), 1974545809u); EXPECT_EQ(Hash("\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 8, kSeed), 3563570120u); - EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), 2706087434u); - EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), 1534654151u); - EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), 2355554696u); - EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), 1400800912u); + EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), + 2706087434u); + EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), + 1534654151u); + EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), + 2355554696u); + EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), + 1400800912u); EXPECT_EQ(Hash("\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 10, kSeed), 3420325137u); EXPECT_EQ(Hash("\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 10, kSeed), diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index 7795e01fc..f7809b989 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -9,8 +9,8 @@ #include "util/rate_limiter.h" -#include #include +#include #include #include "db/db_test_util.h" diff --git a/util/threadpool_imp.cc b/util/threadpool_imp.cc index 2e71f223f..8d0e01e03 100644 --- a/util/threadpool_imp.cc +++ b/util/threadpool_imp.cc @@ -98,24 +98,23 @@ struct ThreadPoolImpl::Impl { void SetThreadPriority(Env::Priority priority) { priority_ = priority; } private: - - static void BGThreadWrapper(void* arg); - - bool low_io_priority_; - bool low_cpu_priority_; - Env::Priority priority_; - Env* env_; - - int total_threads_limit_; - std::atomic_uint queue_len_; // Queue length. Used for stats reporting - bool exit_all_threads_; - bool wait_for_jobs_to_complete_; - - // Entry per Schedule()/Submit() call - struct BGItem { - void* tag = nullptr; - std::function function; - std::function unschedFunction; + static void BGThreadWrapper(void* arg); + + bool low_io_priority_; + bool low_cpu_priority_; + Env::Priority priority_; + Env* env_; + + int total_threads_limit_; + std::atomic_uint queue_len_; // Queue length. Used for stats reporting + bool exit_all_threads_; + bool wait_for_jobs_to_complete_; + + // Entry per Schedule()/Submit() call + struct BGItem { + void* tag = nullptr; + std::function function; + std::function unschedFunction; }; using BGQueue = std::deque; diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 9adeb721b..aca50a2b6 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -24,10 +24,10 @@ #include "util/string_util.h" #include "utilities/checkpoint/checkpoint_impl.h" -#include #include #include #include +#include #include #include #include diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index de16b1522..14bfb4309 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -6,8 +6,8 @@ #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_file.h" -#include #include +#include #include #include diff --git a/utilities/checkpoint/checkpoint_impl.cc b/utilities/checkpoint/checkpoint_impl.cc index 0639ed2f2..6d025662e 100644 --- a/utilities/checkpoint/checkpoint_impl.cc +++ b/utilities/checkpoint/checkpoint_impl.cc @@ -11,8 +11,8 @@ #include "utilities/checkpoint/checkpoint_impl.h" -#include #include +#include #include #include diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index d748f500e..d7410960a 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -313,106 +313,105 @@ TEST_F(CheckpointTest, GetSnapshotLink) { } TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { - // Create a database - Status s; - auto options = CurrentOptions(); - options.create_if_missing = true; - CreateAndReopenWithCF({}, options); - - // Helper to verify the number of files in metadata and export dir - auto verify_files_exported = [&](const ExportImportFilesMetaData& metadata, - int num_files_expected) { - ASSERT_EQ(metadata.files.size(), num_files_expected); - std::vector subchildren; - env_->GetChildren(export_path_, &subchildren); - int num_children = 0; - for (const auto& child : subchildren) { - if (child != "." && child != "..") { - ++num_children; - } + // Create a database + Status s; + auto options = CurrentOptions(); + options.create_if_missing = true; + CreateAndReopenWithCF({}, options); + + // Helper to verify the number of files in metadata and export dir + auto verify_files_exported = [&](const ExportImportFilesMetaData& metadata, + int num_files_expected) { + ASSERT_EQ(metadata.files.size(), num_files_expected); + std::vector subchildren; + env_->GetChildren(export_path_, &subchildren); + int num_children = 0; + for (const auto& child : subchildren) { + if (child != "." && child != "..") { + ++num_children; } - ASSERT_EQ(num_children, num_files_expected); - }; - - // Test DefaultColumnFamily - { - const auto key = std::string("foo"); - ASSERT_OK(Put(key, "v1")); - - Checkpoint* checkpoint; - ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); - - // Export the Tables and verify - ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), - export_path_, &metadata_)); - verify_files_exported(*metadata_, 1); - ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - test::DestroyDir(env_, export_path_); - delete metadata_; - metadata_ = nullptr; - - // Check again after compaction - CompactAll(); - ASSERT_OK(Put(key, "v2")); - ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), - export_path_, &metadata_)); - verify_files_exported(*metadata_, 2); - ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - test::DestroyDir(env_, export_path_); - delete metadata_; - metadata_ = nullptr; - delete checkpoint; - } - - // Test non default column family with non default comparator - { - auto cf_options = CurrentOptions(); - cf_options.comparator = ReverseBytewiseComparator(); - ASSERT_OK( - db_->CreateColumnFamily(cf_options, "yoyo", &cfh_reverse_comp_)); - - const auto key = std::string("foo"); - ASSERT_OK(db_->Put(WriteOptions(), cfh_reverse_comp_, key, "v1")); - - Checkpoint* checkpoint; - ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); - - // Export the Tables and verify - ASSERT_OK(checkpoint->ExportColumnFamily(cfh_reverse_comp_, export_path_, - &metadata_)); - verify_files_exported(*metadata_, 1); - ASSERT_EQ(metadata_->db_comparator_name, - ReverseBytewiseComparator()->Name()); - delete checkpoint; } -} - -TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) { - // Create a database - Status s; - auto options = CurrentOptions(); - options.create_if_missing = true; - CreateAndReopenWithCF({}, options); + ASSERT_EQ(num_children, num_files_expected); + }; + // Test DefaultColumnFamily + { const auto key = std::string("foo"); ASSERT_OK(Put(key, "v1")); Checkpoint* checkpoint; ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); - // Export onto existing directory - env_->CreateDirIfMissing(export_path_); - ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), - export_path_, &metadata_), - Status::InvalidArgument("Specified export_dir exists")); + // Export the Tables and verify + ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), + export_path_, &metadata_)); + verify_files_exported(*metadata_, 1); + ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); + test::DestroyDir(env_, export_path_); + delete metadata_; + metadata_ = nullptr; + + // Check again after compaction + CompactAll(); + ASSERT_OK(Put(key, "v2")); + ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), + export_path_, &metadata_)); + verify_files_exported(*metadata_, 2); + ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); test::DestroyDir(env_, export_path_); + delete metadata_; + metadata_ = nullptr; + delete checkpoint; + } + + // Test non default column family with non default comparator + { + auto cf_options = CurrentOptions(); + cf_options.comparator = ReverseBytewiseComparator(); + ASSERT_OK(db_->CreateColumnFamily(cf_options, "yoyo", &cfh_reverse_comp_)); + + const auto key = std::string("foo"); + ASSERT_OK(db_->Put(WriteOptions(), cfh_reverse_comp_, key, "v1")); + + Checkpoint* checkpoint; + ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); - // Export with invalid directory specification - export_path_ = ""; - ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), - export_path_, &metadata_), - Status::InvalidArgument("Specified export_dir invalid")); + // Export the Tables and verify + ASSERT_OK(checkpoint->ExportColumnFamily(cfh_reverse_comp_, export_path_, + &metadata_)); + verify_files_exported(*metadata_, 1); + ASSERT_EQ(metadata_->db_comparator_name, + ReverseBytewiseComparator()->Name()); delete checkpoint; + } +} + +TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) { + // Create a database + Status s; + auto options = CurrentOptions(); + options.create_if_missing = true; + CreateAndReopenWithCF({}, options); + + const auto key = std::string("foo"); + ASSERT_OK(Put(key, "v1")); + + Checkpoint* checkpoint; + ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); + + // Export onto existing directory + env_->CreateDirIfMissing(export_path_); + ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), + export_path_, &metadata_), + Status::InvalidArgument("Specified export_dir exists")); + test::DestroyDir(env_, export_path_); + + // Export with invalid directory specification + export_path_ = ""; + ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), + export_path_, &metadata_), + Status::InvalidArgument("Specified export_dir invalid")); + delete checkpoint; } TEST_F(CheckpointTest, CheckpointCF) { diff --git a/utilities/merge_operators/sortlist.cc b/utilities/merge_operators/sortlist.cc index 5dbf05115..3d0de94f5 100644 --- a/utilities/merge_operators/sortlist.cc +++ b/utilities/merge_operators/sortlist.cc @@ -2,10 +2,10 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include "utilities/merge_operators/sortlist.h" #include "rocksdb/merge_operator.h" #include "rocksdb/slice.h" #include "utilities/merge_operators.h" -#include "utilities/merge_operators/sortlist.h" using rocksdb::Logger; using rocksdb::MergeOperator; diff --git a/utilities/persistent_cache/persistent_cache_tier.cc b/utilities/persistent_cache/persistent_cache_tier.cc index 752a6fb70..01b675aec 100644 --- a/utilities/persistent_cache/persistent_cache_tier.cc +++ b/utilities/persistent_cache/persistent_cache_tier.cc @@ -8,8 +8,8 @@ #include "utilities/persistent_cache/persistent_cache_tier.h" #include -#include #include +#include namespace rocksdb { diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index ac57a4230..cf0390a1b 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -235,7 +235,9 @@ class SimCacheImpl : public SimCache { return cache_->GetUsage(handle); } - size_t GetCharge(Handle* handle) const override { return cache_->GetCharge(handle); } + size_t GetCharge(Handle* handle) const override { + return cache_->GetCharge(handle); + } size_t GetPinnedUsage() const override { return cache_->GetPinnedUsage(); } diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index 2a6933495..35495e9e1 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -9,7 +9,6 @@ #include #include - #include "db/db_impl/db_impl.h" #include "logging/logging.h" #include "port/port.h" diff --git a/utilities/transactions/transaction_base.cc b/utilities/transactions/transaction_base.cc index f10c1b60f..637a38b9a 100644 --- a/utilities/transactions/transaction_base.cc +++ b/utilities/transactions/transaction_base.cc @@ -638,8 +638,7 @@ void TransactionBaseImpl::TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id, // in case it is not already in the map auto result = cf_key_map.try_emplace(key, seq); auto iter = result.first; - if (!result.second && - seq < iter->second.seq) { + if (!result.second && seq < iter->second.seq) { // Now tracking this key with an earlier sequence number iter->second.seq = seq; } diff --git a/utilities/transactions/transaction_test.h b/utilities/transactions/transaction_test.h index 03bfb6537..62e033282 100644 --- a/utilities/transactions/transaction_test.h +++ b/utilities/transactions/transaction_test.h @@ -5,8 +5,8 @@ #pragma once -#include #include +#include #include #include #include diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index d4f0d993a..7357907d0 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -1445,8 +1445,8 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithUnbackedSnapshot) { s = s_vec[0]; ASSERT_TRUE(s.ok() || s.IsTryAgain()); Slice key("key"); - txn->MultiGet(ropt, db->DefaultColumnFamily(), 1, &key, &pinnable_val, - &s, true); + txn->MultiGet(ropt, db->DefaultColumnFamily(), 1, &key, &pinnable_val, &s, + true); ASSERT_TRUE(s.ok() || s.IsTryAgain()); delete txn; } diff --git a/utilities/transactions/write_prepared_txn_db.cc b/utilities/transactions/write_prepared_txn_db.cc index d06c2b62d..504541ba0 100644 --- a/utilities/transactions/write_prepared_txn_db.cc +++ b/utilities/transactions/write_prepared_txn_db.cc @@ -43,7 +43,7 @@ Status WritePreparedTxnDB::Initialize( ordered_seq_cnt[seq] = cnt; } // AddPrepared must be called in order - for (auto seq_cnt: ordered_seq_cnt) { + for (auto seq_cnt : ordered_seq_cnt) { auto seq = seq_cnt.first; auto cnt = seq_cnt.second; for (size_t i = 0; i < cnt; i++) { diff --git a/utilities/transactions/write_prepared_txn_db.h b/utilities/transactions/write_prepared_txn_db.h index cdcee4941..fdeaf879e 100644 --- a/utilities/transactions/write_prepared_txn_db.h +++ b/utilities/transactions/write_prepared_txn_db.h @@ -544,7 +544,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { } else { assert(heap_top_.load() < v); } - heap_.push_back(v); + heap_.push_back(v); } void pop(bool locked = false) { if (!locked) { diff --git a/utilities/transactions/write_unprepared_txn.cc b/utilities/transactions/write_unprepared_txn.cc index d4e5abad5..9b58d8bc8 100644 --- a/utilities/transactions/write_unprepared_txn.cc +++ b/utilities/transactions/write_unprepared_txn.cc @@ -411,9 +411,8 @@ Status WriteUnpreparedTxn::FlushWriteBatchWithSavePointToDB() { bool trailing_batch = i == unflushed_save_points_->size(); SavePointBatchHandler sp_handler(&write_batch_, *wupt_db_->GetCFHandleMap().get()); - size_t curr_boundary = trailing_batch - ? wb.GetWriteBatch()->GetDataSize() - : (*unflushed_save_points_)[i]; + size_t curr_boundary = trailing_batch ? wb.GetWriteBatch()->GetDataSize() + : (*unflushed_save_points_)[i]; // Construct the partial write batch up to the savepoint. // diff --git a/utilities/transactions/write_unprepared_txn_db.cc b/utilities/transactions/write_unprepared_txn_db.cc index 9d3417f9c..a2290b8d4 100644 --- a/utilities/transactions/write_unprepared_txn_db.cc +++ b/utilities/transactions/write_unprepared_txn_db.cc @@ -293,7 +293,7 @@ Status WriteUnpreparedTxnDB::Initialize( } } // AddPrepared must be called in order - for (auto seq_cnt: ordered_seq_cnt) { + for (auto seq_cnt : ordered_seq_cnt) { auto seq = seq_cnt.first; auto cnt = seq_cnt.second; for (size_t i = 0; i < cnt; i++) { diff --git a/utilities/ttl/db_ttl_impl.h b/utilities/ttl/db_ttl_impl.h index 1111c13a7..042580802 100644 --- a/utilities/ttl/db_ttl_impl.h +++ b/utilities/ttl/db_ttl_impl.h @@ -103,8 +103,8 @@ class DBWithTTLImpl : public DBWithTTL { void SetTtl(ColumnFamilyHandle *h, int32_t ttl) override; private: - // remember whether the Close completes or not - bool closed_; + // remember whether the Close completes or not + bool closed_; }; class TtlIterator : public Iterator { diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 61f5e6449..f95ce4b79 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -87,14 +87,10 @@ class TtlTest : public testing::Test { } // Call db_ttl_->Close() before delete db_ttl_ - void CloseTtl() { - CloseTtlHelper(true); - } + void CloseTtl() { CloseTtlHelper(true); } // No db_ttl_->Close() before delete db_ttl_ - void CloseTtlNoDBClose() { - CloseTtlHelper(false); - } + void CloseTtlNoDBClose() { CloseTtlHelper(false); } void CloseTtlHelper(bool close_db) { if (db_ttl_ != nullptr) { @@ -416,7 +412,6 @@ TEST_F(TtlTest, NoEffect) { CloseTtl(); } - // Rerun the NoEffect test with a different version of CloseTtl // function, where db is directly deleted without close. TEST_F(TtlTest, DestructWithoutClose) { @@ -425,18 +420,18 @@ TEST_F(TtlTest, DestructWithoutClose) { int64_t boundary2 = 2 * boundary1; OpenTtl(); - PutValues(0, boundary1); //T=0: Set1 never deleted - SleepCompactCheck(1, 0, boundary1); //T=1: Set1 still there + PutValues(0, boundary1); // T=0: Set1 never deleted + SleepCompactCheck(1, 0, boundary1); // T=1: Set1 still there CloseTtlNoDBClose(); OpenTtl(0); - PutValues(boundary1, boundary2 - boundary1); //T=1: Set2 never deleted - SleepCompactCheck(1, 0, boundary2); //T=2: Sets1 & 2 still there + PutValues(boundary1, boundary2 - boundary1); // T=1: Set2 never deleted + SleepCompactCheck(1, 0, boundary2); // T=2: Sets1 & 2 still there CloseTtlNoDBClose(); OpenTtl(-1); - PutValues(boundary2, kSampleSize_ - boundary2); //T=3: Set3 never deleted - SleepCompactCheck(1, 0, kSampleSize_, true); //T=4: Sets 1,2,3 still there + PutValues(boundary2, kSampleSize_ - boundary2); // T=3: Set3 never deleted + SleepCompactCheck(1, 0, kSampleSize_, true); // T=4: Sets 1,2,3 still there CloseTtlNoDBClose(); } diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index d9f9eabd9..2f51f4f9f 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -918,9 +918,9 @@ Status WriteBatchWithIndex::GetFromBatchAndDB( if (merge_operator) { std::string merge_result; - s = MergeHelper::TimedFullMerge( - merge_operator, key, merge_data, merge_context.GetOperands(), - &merge_result, logger, statistics, env); + s = MergeHelper::TimedFullMerge(merge_operator, key, merge_data, + merge_context.GetOperands(), + &merge_result, logger, statistics, env); pinnable_val->Reset(); *pinnable_val->GetSelf() = std::move(merge_result); pinnable_val->PinSelf(); diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index 7c7efddd1..ed28fa8fd 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -1308,7 +1308,6 @@ TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge2) { DestroyDB(dbname, options); } - TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge3) { DB* db; Options options;