From 48fe921754cb53d2fa258a9260cfe8d78758da27 Mon Sep 17 00:00:00 2001 From: sdong Date: Tue, 25 Oct 2022 14:29:41 -0700 Subject: [PATCH] Run clang format against files under tools/ and db_stress_tool/ (#10868) Summary: Some lines of .h and .cc files are not properly fomatted. Clear them up with clang format. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10868 Test Plan: Watch existing CI to pass Reviewed By: ajkr Differential Revision: D40683485 fbshipit-source-id: 491fbb78b2cdcb948164f306829909ad816d5d0b --- db_stress_tool/batched_ops_stress.cc | 4 +- db_stress_tool/db_stress_common.h | 4 +- db_stress_tool/db_stress_shared_state.h | 4 +- tools/blob_dump.cc | 1 + .../block_cache_trace_analyzer.cc | 18 +- .../block_cache_trace_analyzer.h | 5 +- tools/db_bench_tool.cc | 610 +++++++++--------- tools/db_sanity_test.cc | 12 +- tools/dump/db_dump_tool.cc | 3 +- tools/ldb_cmd.cc | 121 ++-- tools/ldb_cmd_impl.h | 11 +- tools/ldb_cmd_test.cc | 4 +- tools/ldb_tool.cc | 1 + tools/reduce_levels_test.cc | 4 +- tools/simulated_hybrid_file_system.cc | 3 +- tools/sst_dump_tool.cc | 20 +- tools/trace_analyzer_test.cc | 2 +- tools/write_stress.cc | 9 +- 18 files changed, 410 insertions(+), 426 deletions(-) diff --git a/db_stress_tool/batched_ops_stress.cc b/db_stress_tool/batched_ops_stress.cc index 1f87e752e..e98a546d3 100644 --- a/db_stress_tool/batched_ops_stress.cc +++ b/db_stress_tool/batched_ops_stress.cc @@ -188,8 +188,8 @@ class BatchedOpsStressTest : public StressTest { const std::vector& rand_keys) override { size_t num_keys = rand_keys.size(); std::vector ret_status(num_keys); - std::array keys = {{"0", "1", "2", "3", "4", - "5", "6", "7", "8", "9"}}; + std::array keys = { + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}}; size_t num_prefixes = keys.size(); for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) { std::vector key_slices; diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index bc9e6a17b..397d22299 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -509,8 +509,8 @@ extern inline std::string Key(int64_t val) { if (offset < weight) { // Use the bottom 3 bits of offset as the number of trailing 'x's in the // key. If the next key is going to be of the next level, then skip the - // trailer as it would break ordering. If the key length is already at max, - // skip the trailer. + // trailer as it would break ordering. If the key length is already at + // max, skip the trailer. if (offset < weight - 1 && level < levels - 1) { size_t trailer_len = offset & 0x7; key.append(trailer_len, 'x'); diff --git a/db_stress_tool/db_stress_shared_state.h b/db_stress_tool/db_stress_shared_state.h index c53a0742b..5565c6221 100644 --- a/db_stress_tool/db_stress_shared_state.h +++ b/db_stress_tool/db_stress_shared_state.h @@ -333,9 +333,7 @@ class SharedState { uint64_t GetStartTimestamp() const { return start_timestamp_; } private: - static void IgnoreReadErrorCallback(void*) { - ignore_read_error = true; - } + static void IgnoreReadErrorCallback(void*) { ignore_read_error = true; } // Pick random keys in each column family that will not experience overwrite. std::unordered_set GenerateNoOverwriteIds() const { diff --git a/tools/blob_dump.cc b/tools/blob_dump.cc index ab39b8513..1f75eb20d 100644 --- a/tools/blob_dump.cc +++ b/tools/blob_dump.cc @@ -5,6 +5,7 @@ #ifndef ROCKSDB_LITE #include + #include #include #include diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc index 963719e95..f0bb6975b 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc @@ -1175,7 +1175,8 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime( } void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline( - const uint64_t reuse_window, bool user_access_only, TraceType block_type) const { + const uint64_t reuse_window, bool user_access_only, + TraceType block_type) const { // A map from block key to an array of bools that states whether a block is // accessed in a time window. std::map> block_accessed; @@ -1214,7 +1215,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline( TraverseBlocks(block_callback); // A cell is the number of blocks accessed in a reuse window. - std::unique_ptr reuse_table(new uint64_t[reuse_vector_size * reuse_vector_size]); + std::unique_ptr reuse_table( + new uint64_t[reuse_vector_size * reuse_vector_size]); for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) { // Initialize the reuse_table. for (uint64_t i = 0; i < reuse_vector_size; i++) { @@ -1255,8 +1257,9 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline( if (j < start_time) { row += "100.0"; } else { - row += std::to_string(percent(reuse_table[start_time * reuse_vector_size + j], - reuse_table[start_time * reuse_vector_size + start_time])); + row += std::to_string( + percent(reuse_table[start_time * reuse_vector_size + j], + reuse_table[start_time * reuse_vector_size + start_time])); } } out << row << std::endl; @@ -1811,9 +1814,10 @@ void BlockCacheTraceAnalyzer::PrintDataBlockAccessStats() const { return; } // Use four decimal points. - uint64_t percent_referenced_for_existing_keys = (uint64_t)( - ((double)block.key_num_access_map.size() / (double)block.num_keys) * - 10000.0); + uint64_t percent_referenced_for_existing_keys = + (uint64_t)(((double)block.key_num_access_map.size() / + (double)block.num_keys) * + 10000.0); uint64_t percent_referenced_for_non_existing_keys = (uint64_t)(((double)block.non_exist_key_num_access_map.size() / (double)block.num_keys) * diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer.h b/tools/block_cache_analyzer/block_cache_trace_analyzer.h index e5bc3da31..2f1ebd139 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer.h +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer.h @@ -106,7 +106,7 @@ struct BlockAccessInfo { ParsedInternalKey internal_key; Status s = ParseInternalKey(access.referenced_key, &internal_key, false /* log_err_key */); // TODO - assert(s.ok()); // TODO + assert(s.ok()); // TODO } } else { non_exist_key_num_access_map[access.referenced_key][access.caller]++; @@ -292,7 +292,8 @@ class BlockCacheTraceAnalyzer { // The file is named // "block_type_user_access_only_reuse_window_reuse_timeline". The file format // is start_time,0,1,...,N where N equals trace_duration / reuse_window. - void WriteBlockReuseTimeline(const uint64_t reuse_window, bool user_access_only, + void WriteBlockReuseTimeline(const uint64_t reuse_window, + bool user_access_only, TraceType block_type) const; // Write the Get spatical locality into csv files saved in 'output_dir'. diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 94752f7f9..062554632 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -177,8 +177,8 @@ IF_ROCKSDB_LITE("", " mode\n" "\tfilluniquerandomdeterministic -- write N values in a random" " key order and keep the shape of the LSM tree\n" - "\toverwrite -- overwrite N values in random key order in" - " async mode\n" + "\toverwrite -- overwrite N values in random key order in " + "async mode\n" "\tfillsync -- write N/1000 values in random key order in " "sync mode\n" "\tfill100K -- write N/1000 100K values in random order in" @@ -289,10 +289,12 @@ DEFINE_string(column_family_distribution, "", "and `num_hot_column_families=0`, a valid list could be " "\"10,20,30,40\"."); -DEFINE_int64(reads, -1, "Number of read operations to do. " +DEFINE_int64(reads, -1, + "Number of read operations to do. " "If negative, do FLAGS_num reads."); -DEFINE_int64(deletes, -1, "Number of delete operations to do. " +DEFINE_int64(deletes, -1, + "Number of delete operations to do. " "If negative, do FLAGS_num deletions."); DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality"); @@ -304,7 +306,8 @@ static int64_t seed_base; DEFINE_int32(threads, 1, "Number of concurrent threads to run."); -DEFINE_int32(duration, 0, "Time in seconds for the random-ops tests to run." +DEFINE_int32(duration, 0, + "Time in seconds for the random-ops tests to run." " When 0 then num & reads determine the test duration"); DEFINE_string(value_size_distribution_type, "fixed", @@ -357,8 +360,9 @@ DEFINE_int32(user_timestamp_size, 0, DEFINE_int32(num_multi_db, 0, "Number of DBs used in the benchmark. 0 means single DB."); -DEFINE_double(compression_ratio, 0.5, "Arrange to generate values that shrink" - " to this fraction of their original size after compression"); +DEFINE_double(compression_ratio, 0.5, + "Arrange to generate values that shrink to this fraction of " + "their original size after compression"); DEFINE_double( overwrite_probability, 0.0, @@ -514,9 +518,8 @@ DEFINE_int32(max_background_compactions, DEFINE_uint64(subcompactions, 1, "Maximum number of subcompactions to divide L0-L1 compactions " "into."); -static const bool FLAGS_subcompactions_dummy - __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_subcompactions, - &ValidateUint32Range); +static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) = + RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range); DEFINE_int32(max_background_flushes, ROCKSDB_NAMESPACE::Options().max_background_flushes, @@ -534,14 +537,16 @@ DEFINE_int32(compaction_pri, "priority of files to compaction: by size or by data age"); DEFINE_int32(universal_size_ratio, 0, - "Percentage flexibility while comparing file size" - " (for universal compaction only)."); + "Percentage flexibility while comparing file size " + "(for universal compaction only)."); -DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files in a" - " single compaction run (for universal compaction only)."); +DEFINE_int32(universal_min_merge_width, 0, + "The minimum number of files in a single compaction run " + "(for universal compaction only)."); -DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact" - " in universal style compaction"); +DEFINE_int32(universal_max_merge_width, 0, + "The max number of files to compact in universal style " + "compaction"); DEFINE_int32(universal_max_size_amplification_percent, 0, "The max size amplification for universal style compaction"); @@ -747,9 +752,10 @@ DEFINE_bool(whole_key_filtering, ROCKSDB_NAMESPACE::BlockBasedTableOptions().whole_key_filtering, "Use whole keys (in addition to prefixes) in SST bloom filter."); -DEFINE_bool(use_existing_db, false, "If true, do not destroy the existing" - " database. If you set this flag and also specify a benchmark that" - " wants a fresh database, that benchmark will fail."); +DEFINE_bool(use_existing_db, false, + "If true, do not destroy the existing database. If you set this " + "flag and also specify a benchmark that wants a fresh database, " + "that benchmark will fail."); DEFINE_bool(use_existing_keys, false, "If true, uses existing keys in the DB, " @@ -787,16 +793,15 @@ DEFINE_bool(use_keep_filter, false, "Whether to use a noop compaction filter"); static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) { if (value >= 20) { - fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", - flagname, value); + fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", flagname, + value); return false; } return true; } DEFINE_bool(verify_checksum, true, - "Verify checksum for every block read" - " from storage"); + "Verify checksum for every block read from storage"); DEFINE_int32(checksum_type, ROCKSDB_NAMESPACE::BlockBasedTableOptions().checksum, @@ -808,10 +813,11 @@ DEFINE_int32(stats_level, ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers, DEFINE_string(statistics_string, "", "Serialized statistics string"); static class std::shared_ptr dbstats; -DEFINE_int64(writes, -1, "Number of write operations to do. If negative, do" - " --num reads."); +DEFINE_int64(writes, -1, + "Number of write operations to do. If negative, do --num reads."); -DEFINE_bool(finish_after_writes, false, "Write thread terminates after all writes are finished"); +DEFINE_bool(finish_after_writes, false, + "Write thread terminates after all writes are finished"); DEFINE_bool(sync, false, "Sync all writes to disk"); @@ -876,25 +882,28 @@ DEFINE_uint64(periodic_compaction_seconds, DEFINE_uint64(ttl_seconds, ROCKSDB_NAMESPACE::Options().ttl, "Set options.ttl"); static bool ValidateInt32Percent(const char* flagname, int32_t value) { - if (value <= 0 || value>=100) { - fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", - flagname, value); + if (value <= 0 || value >= 100) { + fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", flagname, + value); return false; } return true; } -DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed" - " as percentage) for the ReadRandomWriteRandom workload. The " - "default value 90 means 90% operations out of all reads and writes" - " operations are reads. In other words, 9 gets for every 1 put."); - -DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed" - " as percentage) for the ReadRandomMergeRandom workload. The" - " default value 70 means 70% out of all read and merge operations" - " are merges. In other words, 7 merges for every 3 gets."); - -DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/" - "deletes (used in RandomWithVerify only). RandomWithVerify " +DEFINE_int32(readwritepercent, 90, + "Ratio of reads to reads/writes (expressed as percentage) for " + "the ReadRandomWriteRandom workload. The default value 90 means " + "90% operations out of all reads and writes operations are " + "reads. In other words, 9 gets for every 1 put."); + +DEFINE_int32(mergereadpercent, 70, + "Ratio of merges to merges&reads (expressed as percentage) for " + "the ReadRandomMergeRandom workload. The default value 70 means " + "70% out of all read and merge operations are merges. In other " + "words, 7 merges for every 3 gets."); + +DEFINE_int32(deletepercent, 2, + "Percentage of deletes out of reads/writes/deletes (used in " + "RandomWithVerify only). RandomWithVerify " "calculates writepercent as (100 - FLAGS_readwritepercent - " "deletepercent), so deletepercent must be smaller than (100 - " "FLAGS_readwritepercent)"); @@ -1304,7 +1313,8 @@ DEFINE_int32(compression_zstd_max_train_bytes, "Maximum size of training data passed to zstd's dictionary " "trainer."); -DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts" +DEFINE_int32(min_level_to_compress, -1, + "If non-negative, compression starts" " from this level. Levels with number < min_level_to_compress are" " not compressed. Otherwise, apply compression_type to " "all levels."); @@ -1342,8 +1352,8 @@ DEFINE_string(fs_uri, "", #endif // ROCKSDB_LITE DEFINE_string(simulate_hybrid_fs_file, "", "File for Store Metadata for Simulate hybrid FS. Empty means " - "disable the feature. Now, if it is set, " - "last_level_temperature is set to kWarm."); + "disable the feature. Now, if it is set, last_level_temperature " + "is set to kWarm."); DEFINE_int32(simulate_hybrid_hdd_multipliers, 1, "In simulate_hybrid_fs_file or simulate_hdd mode, how many HDDs " "are simulated."); @@ -1360,18 +1370,21 @@ static std::shared_ptr env_guard; static ROCKSDB_NAMESPACE::Env* FLAGS_env = ROCKSDB_NAMESPACE::Env::Default(); -DEFINE_int64(stats_interval, 0, "Stats are reported every N operations when " - "this is greater than zero. When 0 the interval grows over time."); +DEFINE_int64(stats_interval, 0, + "Stats are reported every N operations when this is greater than " + "zero. When 0 the interval grows over time."); -DEFINE_int64(stats_interval_seconds, 0, "Report stats every N seconds. This " - "overrides stats_interval when both are > 0."); +DEFINE_int64(stats_interval_seconds, 0, + "Report stats every N seconds. This overrides stats_interval when" + " both are > 0."); -DEFINE_int32(stats_per_interval, 0, "Reports additional stats per interval when" - " this is greater than 0."); +DEFINE_int32(stats_per_interval, 0, + "Reports additional stats per interval when this is greater than " + "0."); DEFINE_uint64(slow_usecs, 1000000, - "A message is printed for operations that " - "take at least this many microseconds."); + "A message is printed for operations that take at least this " + "many microseconds."); DEFINE_int64(report_interval_seconds, 0, "If greater than zero, it will write simple stats in CSV format " @@ -1441,24 +1454,19 @@ DEFINE_bool(rate_limiter_auto_tuned, false, "Enable dynamic adjustment of rate limit according to demand for " "background I/O"); +DEFINE_bool(sine_write_rate, false, "Use a sine wave write_rate_limit"); -DEFINE_bool(sine_write_rate, false, - "Use a sine wave write_rate_limit"); - -DEFINE_uint64(sine_write_rate_interval_milliseconds, 10000, - "Interval of which the sine wave write_rate_limit is recalculated"); +DEFINE_uint64( + sine_write_rate_interval_milliseconds, 10000, + "Interval of which the sine wave write_rate_limit is recalculated"); -DEFINE_double(sine_a, 1, - "A in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_a, 1, "A in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_b, 1, - "B in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_b, 1, "B in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_c, 0, - "C in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_c, 0, "C in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_d, 1, - "D in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_d, 1, "D in f(x) = A sin(bx + c) + d"); DEFINE_bool(rate_limit_bg_reads, false, "Use options.rate_limiter on compaction reads"); @@ -1548,8 +1556,8 @@ DEFINE_bool(print_malloc_stats, false, DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions"); DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds."); -DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files" - " in MB."); +DEFINE_uint64(wal_size_limit_MB, 0, + "Set the size limit for the WAL Files in MB."); DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size"); DEFINE_bool(mmap_read, ROCKSDB_NAMESPACE::Options().allow_mmap_reads, @@ -1616,11 +1624,12 @@ DEFINE_int32(num_deletion_threads, 1, "Number of threads to do deletion (used in TimeSeries and delete " "expire_style only)."); -DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge" - " operations on a key in the memtable"); +DEFINE_int32(max_successive_merges, 0, + "Maximum number of successive merge operations on a key in the " + "memtable"); static bool ValidatePrefixSize(const char* flagname, int32_t value) { - if (value < 0 || value>=2000000000) { + if (value < 0 || value >= 2000000000) { fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n", flagname, value); return false; @@ -1628,11 +1637,12 @@ static bool ValidatePrefixSize(const char* flagname, int32_t value) { return true; } -DEFINE_int32(prefix_size, 0, "control the prefix size for HashSkipList and " - "plain table"); -DEFINE_int64(keys_per_prefix, 0, "control average number of keys generated " - "per prefix, 0 means no special handling of the prefix, " - "i.e. use the prefix comes with the generated random number."); +DEFINE_int32(prefix_size, 0, + "control the prefix size for HashSkipList and plain table"); +DEFINE_int64(keys_per_prefix, 0, + "control average number of keys generated per prefix, 0 means no " + "special handling of the prefix, i.e. use the prefix comes with " + "the generated random number."); DEFINE_bool(total_order_seek, false, "Enable total order seek regardless of index format."); DEFINE_bool(prefix_same_as_start, false, @@ -1644,13 +1654,13 @@ DEFINE_bool( DEFINE_int32(memtable_insert_with_hint_prefix_size, 0, "If non-zero, enable " "memtable insert with hint with the given prefix size."); -DEFINE_bool(enable_io_prio, false, "Lower the background flush/compaction " - "threads' IO priority"); -DEFINE_bool(enable_cpu_prio, false, "Lower the background flush/compaction " - "threads' CPU priority"); -DEFINE_bool(identity_as_first_hash, false, "the first hash function of cuckoo " - "table becomes an identity function. This is only valid when key " - "is 8 bytes"); +DEFINE_bool(enable_io_prio, false, + "Lower the background flush/compaction threads' IO priority"); +DEFINE_bool(enable_cpu_prio, false, + "Lower the background flush/compaction threads' CPU priority"); +DEFINE_bool(identity_as_first_hash, false, + "the first hash function of cuckoo table becomes an identity " + "function. This is only valid when key is 8 bytes"); DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG "); DEFINE_uint64(stats_dump_period_sec, ROCKSDB_NAMESPACE::Options().stats_dump_period_sec, @@ -1673,22 +1683,23 @@ DEFINE_bool(multiread_batched, false, "Use the new MultiGet API"); DEFINE_string(memtablerep, "skip_list", ""); DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count"); -DEFINE_bool(use_plain_table, false, "if use plain table " - "instead of block-based table format"); +DEFINE_bool(use_plain_table, false, + "if use plain table instead of block-based table format"); DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format"); DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table."); -DEFINE_bool(use_hash_search, false, "if use kHashSearch " - "instead of kBinarySearch. " +DEFINE_bool(use_hash_search, false, + "if use kHashSearch instead of kBinarySearch. " "This is valid if only we use BlockTable"); -DEFINE_string(merge_operator, "", "The merge operator to use with the database." +DEFINE_string(merge_operator, "", + "The merge operator to use with the database." "If a new merge operator is specified, be sure to use fresh" " database The possible merge operators are defined in" " utilities/merge_operators.h"); -DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try " - "linear search first for this many steps from the previous " - "position"); -DEFINE_bool(report_file_operations, false, "if report number of file " - "operations"); +DEFINE_int32(skip_list_lookahead, 0, + "Used with skip_list memtablerep; try linear search first for " + "this many steps from the previous position"); +DEFINE_bool(report_file_operations, false, + "if report number of file operations"); DEFINE_bool(report_open_timing, false, "if report open timing"); DEFINE_int32(readahead_size, 0, "Iterator readahead size"); @@ -1724,9 +1735,9 @@ DEFINE_bool(allow_data_in_errors, static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent); -static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) = - RegisterFlagValidator(&FLAGS_table_cache_numshardbits, - &ValidateTableCacheNumshardbits); +static const bool FLAGS_table_cache_numshardbits_dummy + __attribute__((__unused__)) = RegisterFlagValidator( + &FLAGS_table_cache_numshardbits, &ValidateTableCacheNumshardbits); DEFINE_uint32(write_batch_protection_bytes_per_key, 0, "Size of per-key-value checksum in each write batch. Currently " @@ -1775,11 +1786,7 @@ static Status CreateMemTableRepFactory( } // namespace -enum DistributionType : unsigned char { - kFixed = 0, - kUniform, - kNormal -}; +enum DistributionType : unsigned char { kFixed = 0, kUniform, kNormal }; static enum DistributionType FLAGS_value_size_distribution_type_e = kFixed; @@ -1811,33 +1818,27 @@ class BaseDistribution { } return val; } + private: virtual unsigned int Get() = 0; - virtual bool NeedTruncate() { - return true; - } + virtual bool NeedTruncate() { return true; } unsigned int min_value_size_; unsigned int max_value_size_; }; -class FixedDistribution : public BaseDistribution -{ +class FixedDistribution : public BaseDistribution { public: - FixedDistribution(unsigned int size) : - BaseDistribution(size, size), - size_(size) {} + FixedDistribution(unsigned int size) + : BaseDistribution(size, size), size_(size) {} + private: - virtual unsigned int Get() override { - return size_; - } - virtual bool NeedTruncate() override { - return false; - } + virtual unsigned int Get() override { return size_; } + virtual bool NeedTruncate() override { return false; } unsigned int size_; }; -class NormalDistribution - : public BaseDistribution, public std::normal_distribution { +class NormalDistribution : public BaseDistribution, + public std::normal_distribution { public: NormalDistribution(unsigned int _min, unsigned int _max) : BaseDistribution(_min, _max), @@ -1855,9 +1856,8 @@ class NormalDistribution std::mt19937 gen_; }; -class UniformDistribution - : public BaseDistribution, - public std::uniform_int_distribution { +class UniformDistribution : public BaseDistribution, + public std::uniform_int_distribution { public: UniformDistribution(unsigned int _min, unsigned int _max) : BaseDistribution(_min, _max), @@ -1865,12 +1865,8 @@ class UniformDistribution gen_(rd_()) {} private: - virtual unsigned int Get() override { - return (*this)(gen_); - } - virtual bool NeedTruncate() override { - return false; - } + virtual unsigned int Get() override { return (*this)(gen_); } + virtual bool NeedTruncate() override { return false; } std::random_device rd_; std::mt19937 gen_; }; @@ -1883,7 +1879,6 @@ class RandomGenerator { std::unique_ptr dist_; public: - RandomGenerator() { auto max_value_size = FLAGS_value_size_max; switch (FLAGS_value_size_distribution_type_e) { @@ -1892,8 +1887,8 @@ class RandomGenerator { FLAGS_value_size_max)); break; case kNormal: - dist_.reset(new NormalDistribution(FLAGS_value_size_min, - FLAGS_value_size_max)); + dist_.reset( + new NormalDistribution(FLAGS_value_size_min, FLAGS_value_size_max)); break; case kFixed: default: @@ -1942,7 +1937,7 @@ struct DBWithColumnFamilies { DB* db; #ifndef ROCKSDB_LITE OptimisticTransactionDB* opt_txn_db; -#endif // ROCKSDB_LITE +#endif // ROCKSDB_LITE std::atomic num_created; // Need to be updated after all the // new entries in cfh are set. size_t num_hot; // Number of column families to be queried at each moment. @@ -1955,7 +1950,8 @@ struct DBWithColumnFamilies { DBWithColumnFamilies() : db(nullptr) #ifndef ROCKSDB_LITE - , opt_txn_db(nullptr) + , + opt_txn_db(nullptr) #endif // ROCKSDB_LITE { cfh.clear(); @@ -2138,19 +2134,12 @@ enum OperationType : unsigned char { }; static std::unordered_map> - OperationTypeString = { - {kRead, "read"}, - {kWrite, "write"}, - {kDelete, "delete"}, - {kSeek, "seek"}, - {kMerge, "merge"}, - {kUpdate, "update"}, - {kCompress, "compress"}, - {kCompress, "uncompress"}, - {kCrc, "crc"}, - {kHash, "hash"}, - {kOthers, "op"} -}; + OperationTypeString = {{kRead, "read"}, {kWrite, "write"}, + {kDelete, "delete"}, {kSeek, "seek"}, + {kMerge, "merge"}, {kUpdate, "update"}, + {kCompress, "compress"}, {kCompress, "uncompress"}, + {kCrc, "crc"}, {kHash, "hash"}, + {kOthers, "op"}}; class CombinedStats; class Stats { @@ -2168,7 +2157,8 @@ class Stats { uint64_t last_op_finish_; uint64_t last_report_finish_; std::unordered_map, - std::hash> hist_; + std::hash> + hist_; std::string message_; bool exclude_from_merge_; ReporterAgent* reporter_agent_; // does not own @@ -2200,15 +2190,14 @@ class Stats { } void Merge(const Stats& other) { - if (other.exclude_from_merge_) - return; + if (other.exclude_from_merge_) return; for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) { auto this_it = hist_.find(it->first); if (this_it != hist_.end()) { this_it->second->Merge(*(other.hist_.at(it->first))); } else { - hist_.insert({ it->first, it->second }); + hist_.insert({it->first, it->second}); } } @@ -2227,9 +2216,7 @@ class Stats { seconds_ = (finish_ - start_) * 1e-6; } - void AddMessage(Slice msg) { - AppendWithSpace(&message_, msg); - } + void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void SetId(int id) { id_ = id; } void SetExcludeFromMerge() { exclude_from_merge_ = true; } @@ -2238,27 +2225,27 @@ class Stats { std::vector thread_list; FLAGS_env->GetThreadList(&thread_list); - fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", - "ThreadID", "ThreadType", "cfName", "Operation", - "ElapsedTime", "Stage", "State", "OperationProperties"); + fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", "ThreadID", + "ThreadType", "cfName", "Operation", "ElapsedTime", "Stage", + "State", "OperationProperties"); int64_t current_time = 0; clock_->GetCurrentTime(¤t_time).PermitUncheckedError(); for (auto ts : thread_list) { fprintf(stderr, "%18" PRIu64 " %10s %12s %20s %13s %45s %12s", - ts.thread_id, - ThreadStatus::GetThreadTypeName(ts.thread_type).c_str(), - ts.cf_name.c_str(), - ThreadStatus::GetOperationName(ts.operation_type).c_str(), - ThreadStatus::MicrosToString(ts.op_elapsed_micros).c_str(), - ThreadStatus::GetOperationStageName(ts.operation_stage).c_str(), - ThreadStatus::GetStateName(ts.state_type).c_str()); + ts.thread_id, + ThreadStatus::GetThreadTypeName(ts.thread_type).c_str(), + ts.cf_name.c_str(), + ThreadStatus::GetOperationName(ts.operation_type).c_str(), + ThreadStatus::MicrosToString(ts.op_elapsed_micros).c_str(), + ThreadStatus::GetOperationStageName(ts.operation_stage).c_str(), + ThreadStatus::GetStateName(ts.state_type).c_str()); auto op_properties = ThreadStatus::InterpretOperationProperties( ts.operation_type, ts.op_properties); for (const auto& op_prop : op_properties) { - fprintf(stderr, " %s %" PRIu64" |", - op_prop.first.c_str(), op_prop.second); + fprintf(stderr, " %s %" PRIu64 " |", op_prop.first.c_str(), + op_prop.second); } fprintf(stderr, "\n"); } @@ -2266,13 +2253,9 @@ class Stats { void ResetSineInterval() { sine_interval_ = clock_->NowMicros(); } - uint64_t GetSineInterval() { - return sine_interval_; - } + uint64_t GetSineInterval() { return sine_interval_; } - uint64_t GetStart() { - return start_; - } + uint64_t GetStart() { return start_; } void ResetLastOpTime() { // Set to now to avoid latency from calls to SleepForMicroseconds. @@ -2288,8 +2271,7 @@ class Stats { uint64_t now = clock_->NowMicros(); uint64_t micros = now - last_op_finish_; - if (hist_.find(op_type) == hist_.end()) - { + if (hist_.find(op_type) == hist_.end()) { auto hist_temp = std::make_shared(); hist_.insert({op_type, std::move(hist_temp)}); } @@ -2305,13 +2287,20 @@ class Stats { done_ += num_ops; if (done_ >= next_report_ && FLAGS_progress_reports) { if (!FLAGS_stats_interval) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, ""); } else { uint64_t now = clock_->NowMicros(); @@ -2397,9 +2386,7 @@ class Stats { } } - void AddBytes(int64_t n) { - bytes_ += n; - } + void AddBytes(int64_t n) { bytes_ += n; } void Report(const Slice& name) { // Pretend at least one op was done in case we are running a benchmark @@ -2417,7 +2404,7 @@ class Stats { extra = rate; } AppendWithSpace(&extra, message_); - double throughput = (double)done_/elapsed; + double throughput = (double)done_ / elapsed; fprintf(stdout, "%-12s : %11.3f micros/op %ld ops/sec %.3f seconds %" PRIu64 @@ -2642,13 +2629,13 @@ struct SharedState { long num_done; bool start; - SharedState() : cv(&mu), perf_level(FLAGS_perf_level) { } + SharedState() : cv(&mu), perf_level(FLAGS_perf_level) {} }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { - int tid; // 0..n-1 when running in n threads - Random64 rand; // Has different seeds for different threads + int tid; // 0..n-1 when running in n threads + Random64 rand; // Has different seeds for different threads Stats stats; SharedState* shared; @@ -2660,7 +2647,7 @@ class Duration { public: Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) { max_seconds_ = max_seconds; - max_ops_= max_ops; + max_ops_ = max_ops; ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops; ops_ = 0; start_at_ = FLAGS_env->NowMicros(); @@ -2669,7 +2656,7 @@ class Duration { int64_t GetStage() { return std::min(ops_, max_ops_ - 1) / ops_per_stage_; } bool Done(int64_t increment) { - if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops + if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops ops_ += increment; if (max_seconds_) { @@ -2726,7 +2713,7 @@ class Benchmark { int64_t readwrites_; int64_t merge_keys_; bool report_file_operations_; - bool use_blob_db_; // Stacked BlobDB + bool use_blob_db_; // Stacked BlobDB bool read_operands_; // read via GetMergeOperands() std::vector keys_; @@ -2810,28 +2797,30 @@ class Benchmark { FLAGS_key_size, FLAGS_user_timestamp_size); auto avg_value_size = FLAGS_value_size; if (FLAGS_value_size_distribution_type_e == kFixed) { - fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", + fprintf(stdout, + "Values: %d bytes each (%d bytes after compression)\n", avg_value_size, static_cast(avg_value_size * FLAGS_compression_ratio + 0.5)); } else { avg_value_size = (FLAGS_value_size_min + FLAGS_value_size_max) / 2; - fprintf(stdout, "Values: %d avg bytes each (%d bytes after compression)\n", + fprintf(stdout, + "Values: %d avg bytes each (%d bytes after compression)\n", avg_value_size, static_cast(avg_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Values Distribution: %s (min: %d, max: %d)\n", - FLAGS_value_size_distribution_type.c_str(), - FLAGS_value_size_min, FLAGS_value_size_max); + FLAGS_value_size_distribution_type.c_str(), FLAGS_value_size_min, + FLAGS_value_size_max); } fprintf(stdout, "Entries: %" PRIu64 "\n", num_); fprintf(stdout, "Prefix: %d bytes\n", FLAGS_prefix_size); fprintf(stdout, "Keys per prefix: %" PRIu64 "\n", keys_per_prefix_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast(FLAGS_key_size + avg_value_size) * num_) - / 1048576.0)); - fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio) - * num_) - / 1048576.0)); + ((static_cast(FLAGS_key_size + avg_value_size) * num_) / + 1048576.0)); + fprintf( + stdout, "FileSize: %.1f MB (estimated)\n", + (((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio) * num_) / + 1048576.0)); fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n", FLAGS_benchmark_write_rate_limit); fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n", @@ -2865,9 +2854,9 @@ class Benchmark { void PrintWarnings(const char* compression) { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf(stdout, - "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" - ); + fprintf( + stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, @@ -2903,7 +2892,7 @@ class Benchmark { start++; } unsigned int limit = static_cast(s.size()); - while (limit > start && isspace(s[limit-1])) { + while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); @@ -3660,7 +3649,7 @@ class Benchmark { method = &Benchmark::VerifyChecksum; } else if (name == "verifyfilechecksums") { method = &Benchmark::VerifyFileChecksums; -#endif // ROCKSDB_LITE +#endif // ROCKSDB_LITE } else if (name == "readrandomoperands") { read_operands_ = true; method = &Benchmark::ReadRandom; @@ -3875,7 +3864,7 @@ class Benchmark { } } - SetPerfLevel(static_cast (shared->perf_level)); + SetPerfLevel(static_cast(shared->perf_level)); perf_context.EnablePerLevelPerfContext(); thread->stats.Start(thread->tid); (arg->bm->*(arg->method))(thread); @@ -3977,7 +3966,7 @@ class Benchmark { template static inline void ChecksumBenchmark(FnType fn, ThreadState* thread, Args... args) { - const int size = FLAGS_block_size; // use --block_size option for db_bench + const int size = FLAGS_block_size; // use --block_size option for db_bench std::string labels = "(" + std::to_string(FLAGS_block_size) + " per op)"; const char* label = labels.c_str(); @@ -4016,7 +4005,7 @@ class Benchmark { int dummy; std::atomic ap(&dummy); int count = 0; - void *ptr = nullptr; + void* ptr = nullptr; thread->stats.AddMessage("(each op is 1000 loads)"); while (count < 100000) { for (int i = 0; i < 1000; i++) { @@ -4028,7 +4017,7 @@ class Benchmark { if (ptr == nullptr) exit(1); // Disable unused variable warning. } - void Compress(ThreadState *thread) { + void Compress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(FLAGS_block_size); int64_t bytes = 0; @@ -4060,7 +4049,7 @@ class Benchmark { } } - void Uncompress(ThreadState *thread) { + void Uncompress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(FLAGS_block_size); std::string compressed; @@ -4162,7 +4151,7 @@ class Benchmark { options.write_buffer_size = FLAGS_write_buffer_size; options.max_write_buffer_number = FLAGS_max_write_buffer_number; options.min_write_buffer_number_to_merge = - FLAGS_min_write_buffer_number_to_merge; + FLAGS_min_write_buffer_number_to_merge; options.max_write_buffer_number_to_maintain = FLAGS_max_write_buffer_number_to_maintain; options.max_write_buffer_size_to_maintain = @@ -4230,8 +4219,9 @@ class Benchmark { } else if ((FLAGS_prefix_size == 0) && (options.memtable_factory->IsInstanceOf("prefix_hash") || options.memtable_factory->IsInstanceOf("hash_linkedlist"))) { - fprintf(stderr, "prefix_size should be non-zero if PrefixHash or " - "HashLinkedList memtablerep is used\n"); + fprintf(stderr, + "prefix_size should be non-zero if PrefixHash or " + "HashLinkedList memtablerep is used\n"); exit(1); } if (FLAGS_use_plain_table) { @@ -4272,8 +4262,8 @@ class Benchmark { ROCKSDB_NAMESPACE::CuckooTableOptions table_options; table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio; table_options.identity_as_first_hash = FLAGS_identity_as_first_hash; - options.table_factory = std::shared_ptr( - NewCuckooTableFactory(table_options)); + options.table_factory = + std::shared_ptr(NewCuckooTableFactory(table_options)); #else fprintf(stderr, "Cuckoo table is not supported in lite mode\n"); exit(1); @@ -4285,7 +4275,7 @@ class Benchmark { if (FLAGS_use_hash_search) { if (FLAGS_prefix_size == 0) { fprintf(stderr, - "prefix_size not assigned when enable use_hash_search \n"); + "prefix_size not assigned when enable use_hash_search \n"); exit(1); } block_based_options.index_type = BlockBasedTableOptions::kHashSearch; @@ -4520,13 +4510,13 @@ class Benchmark { exit(1); } options.max_bytes_for_level_multiplier_additional = - FLAGS_max_bytes_for_level_multiplier_additional_v; + FLAGS_max_bytes_for_level_multiplier_additional_v; } options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger; options.level0_file_num_compaction_trigger = FLAGS_level0_file_num_compaction_trigger; options.level0_slowdown_writes_trigger = - FLAGS_level0_slowdown_writes_trigger; + FLAGS_level0_slowdown_writes_trigger; options.compression = FLAGS_compression_type_e; if (FLAGS_simulate_hybrid_fs_file != "") { options.bottommost_temperature = Temperature::kWarm; @@ -4546,8 +4536,7 @@ class Benchmark { for (int i = 0; i < FLAGS_min_level_to_compress; i++) { options.compression_per_level[i] = kNoCompression; } - for (int i = FLAGS_min_level_to_compress; - i < FLAGS_num_levels; i++) { + for (int i = FLAGS_min_level_to_compress; i < FLAGS_num_levels; i++) { options.compression_per_level[i] = FLAGS_compression_type_e; } } @@ -4601,23 +4590,23 @@ class Benchmark { // set universal style compaction configurations, if applicable if (FLAGS_universal_size_ratio != 0) { options.compaction_options_universal.size_ratio = - FLAGS_universal_size_ratio; + FLAGS_universal_size_ratio; } if (FLAGS_universal_min_merge_width != 0) { options.compaction_options_universal.min_merge_width = - FLAGS_universal_min_merge_width; + FLAGS_universal_min_merge_width; } if (FLAGS_universal_max_merge_width != 0) { options.compaction_options_universal.max_merge_width = - FLAGS_universal_max_merge_width; + FLAGS_universal_max_merge_width; } if (FLAGS_universal_max_size_amplification_percent != 0) { options.compaction_options_universal.max_size_amplification_percent = - FLAGS_universal_max_size_amplification_percent; + FLAGS_universal_max_size_amplification_percent; } if (FLAGS_universal_compression_size_percent != -1) { options.compaction_options_universal.compression_size_percent = - FLAGS_universal_compression_size_percent; + FLAGS_universal_compression_size_percent; } options.compaction_options_universal.allow_trivial_move = FLAGS_universal_allow_trivial_move; @@ -4807,7 +4796,7 @@ class Benchmark { } void OpenDb(Options options, const std::string& db_name, - DBWithColumnFamilies* db) { + DBWithColumnFamilies* db) { uint64_t open_start = FLAGS_report_open_timing ? FLAGS_env->NowNanos() : 0; Status s; // Open with column families if necessary. @@ -4822,7 +4811,7 @@ class Benchmark { std::vector column_families; for (size_t i = 0; i < num_hot; i++) { column_families.push_back(ColumnFamilyDescriptor( - ColumnFamilyName(i), ColumnFamilyOptions(options))); + ColumnFamilyName(i), ColumnFamilyOptions(options))); } std::vector cfh_idx_to_prob; if (!FLAGS_column_family_distribution.empty()) { @@ -4848,8 +4837,8 @@ class Benchmark { } #ifndef ROCKSDB_LITE if (FLAGS_readonly) { - s = DB::OpenForReadOnly(options, db_name, column_families, - &db->cfh, &db->db); + s = DB::OpenForReadOnly(options, db_name, column_families, &db->cfh, + &db->db); } else if (FLAGS_optimistic_transaction_db) { s = OptimisticTransactionDB::Open(options, db_name, column_families, &db->cfh, &db->opt_txn_db); @@ -4960,9 +4949,7 @@ class Benchmark { } } - enum WriteMode { - RANDOM, SEQUENTIAL, UNIQUE_RANDOM - }; + enum WriteMode { RANDOM, SEQUENTIAL, UNIQUE_RANDOM }; void WriteSeqDeterministic(ThreadState* thread) { DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL); @@ -4973,13 +4960,9 @@ class Benchmark { UNIQUE_RANDOM); } - void WriteSeq(ThreadState* thread) { - DoWrite(thread, SEQUENTIAL); - } + void WriteSeq(ThreadState* thread) { DoWrite(thread, SEQUENTIAL); } - void WriteRandom(ThreadState* thread) { - DoWrite(thread, RANDOM); - } + void WriteRandom(ThreadState* thread) { DoWrite(thread, RANDOM); } void WriteUniqueRandom(ThreadState* thread) { DoWrite(thread, UNIQUE_RANDOM); @@ -5033,9 +5016,7 @@ class Benchmark { std::vector values_; }; - DB* SelectDB(ThreadState* thread) { - return SelectDBWithCfh(thread)->db; - } + DB* SelectDB(ThreadState* thread) { return SelectDBWithCfh(thread)->db; } DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) { return SelectDBWithCfh(thread->rand.Next()); @@ -5044,13 +5025,13 @@ class Benchmark { DBWithColumnFamilies* SelectDBWithCfh(uint64_t rand_int) { if (db_.db != nullptr) { return &db_; - } else { + } else { return &multi_dbs_[rand_int % multi_dbs_.size()]; } } double SineRate(double x) { - return FLAGS_sine_a*sin((FLAGS_sine_b*x) + FLAGS_sine_c) + FLAGS_sine_d; + return FLAGS_sine_a * sin((FLAGS_sine_b * x) + FLAGS_sine_c) + FLAGS_sine_d; } void DoWrite(ThreadState* thread, WriteMode write_mode) { @@ -5354,8 +5335,7 @@ class Benchmark { // We use same rand_num as seed for key and column family so that we // can deterministically find the cfh corresponding to a particular // key while reading the key. - batch.Put(db_with_cfh->GetCfh(rand_num), key, - val); + batch.Put(db_with_cfh->GetCfh(rand_num), key, val); } batch_bytes += val.size() + key_size_ + user_timestamp_size_; bytes += val.size() + key_size_ + user_timestamp_size_; @@ -5427,8 +5407,8 @@ class Benchmark { } if (thread->shared->write_rate_limiter.get() != nullptr) { thread->shared->write_rate_limiter->Request( - batch_bytes, Env::IO_HIGH, - nullptr /* stats */, RateLimiter::OpType::kWrite); + batch_bytes, Env::IO_HIGH, nullptr /* stats */, + RateLimiter::OpType::kWrite); // Set time at which last op finished to Now() to hide latency and // sleep from rate limiter. Also, do the check once per batch, not // once per write. @@ -5463,12 +5443,12 @@ class Benchmark { if (usecs_since_last > (FLAGS_sine_write_rate_interval_milliseconds * uint64_t{1000})) { double usecs_since_start = - static_cast(now - thread->stats.GetStart()); + static_cast(now - thread->stats.GetStart()); thread->stats.ResetSineInterval(); uint64_t write_rate = - static_cast(SineRate(usecs_since_start / 1000000.0)); + static_cast(SineRate(usecs_since_start / 1000000.0)); thread->shared->write_rate_limiter.reset( - NewGenericRateLimiter(write_rate)); + NewGenericRateLimiter(write_rate)); } } if (!s.ok()) { @@ -5564,11 +5544,13 @@ class Benchmark { continue; } } - writes_ /= static_cast(open_options_.max_bytes_for_level_multiplier); + writes_ /= + static_cast(open_options_.max_bytes_for_level_multiplier); } for (size_t i = 0; i < num_db; i++) { if (sorted_runs[i].size() < num_levels - 1) { - fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); + fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", + num_levels); exit(1); } } @@ -5579,13 +5561,14 @@ class Benchmark { auto options = db->GetOptions(); MutableCFOptions mutable_cf_options(options); for (size_t j = 0; j < sorted_runs[i].size(); j++) { - compactionOptions.output_file_size_limit = - MaxFileSizeForLevel(mutable_cf_options, - static_cast(output_level), compaction_style); + compactionOptions.output_file_size_limit = MaxFileSizeForLevel( + mutable_cf_options, static_cast(output_level), + compaction_style); std::cout << sorted_runs[i][j].size() << std::endl; - db->CompactFiles(compactionOptions, {sorted_runs[i][j].back().name, - sorted_runs[i][j].front().name}, - static_cast(output_level - j) /*level*/); + db->CompactFiles( + compactionOptions, + {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name}, + static_cast(output_level - j) /*level*/); } } } else if (compaction_style == kCompactionStyleUniversal) { @@ -5616,11 +5599,13 @@ class Benchmark { } num_files_at_level0[i] = meta.levels[0].files.size(); } - writes_ = static_cast(writes_* static_cast(100) / (ratio + 200)); + writes_ = static_cast(writes_ * static_cast(100) / + (ratio + 200)); } for (size_t i = 0; i < num_db; i++) { if (sorted_runs[i].size() < num_levels) { - fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); + fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", + num_levels); exit(1); } } @@ -5631,9 +5616,9 @@ class Benchmark { auto options = db->GetOptions(); MutableCFOptions mutable_cf_options(options); for (size_t j = 0; j < sorted_runs[i].size(); j++) { - compactionOptions.output_file_size_limit = - MaxFileSizeForLevel(mutable_cf_options, - static_cast(output_level), compaction_style); + compactionOptions.output_file_size_limit = MaxFileSizeForLevel( + mutable_cf_options, static_cast(output_level), + compaction_style); db->CompactFiles( compactionOptions, {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name}, @@ -5644,7 +5629,7 @@ class Benchmark { } else if (compaction_style == kCompactionStyleFIFO) { if (num_levels != 1) { return Status::InvalidArgument( - "num_levels should be 1 for FIFO compaction"); + "num_levels should be 1 for FIFO compaction"); } if (FLAGS_num_multi_db != 0) { return Status::InvalidArgument("Doesn't support multiDB"); @@ -5661,7 +5646,7 @@ class Benchmark { db->GetColumnFamilyMetaData(&meta); auto total_size = meta.levels[0].size; if (total_size >= - db->GetOptions().compaction_options_fifo.max_table_files_size) { + db->GetOptions().compaction_options_fifo.max_table_files_size) { for (auto file_meta : meta.levels[0].files) { file_names.emplace_back(file_meta.name); } @@ -5698,8 +5683,8 @@ class Benchmark { db->GetColumnFamilyMetaData(&meta); auto total_size = meta.levels[0].size; assert(total_size <= - db->GetOptions().compaction_options_fifo.max_table_files_size); - break; + db->GetOptions().compaction_options_fifo.max_table_files_size); + break; } // verify smallest/largest seqno and key range of each sorted run @@ -5765,7 +5750,9 @@ class Benchmark { for (size_t k = 0; k < num_db; k++) { auto db = db_list[k]; fprintf(stdout, - "---------------------- DB %" ROCKSDB_PRIszt " LSM ---------------------\n", k); + "---------------------- DB %" ROCKSDB_PRIszt + " LSM ---------------------\n", + k); db->GetColumnFamilyMetaData(&meta); for (auto& levelMeta : meta.levels) { if (levelMeta.files.empty()) { @@ -5983,7 +5970,9 @@ class Benchmark { } while (!duration.Done(100)); char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found, " + snprintf(msg, sizeof(msg), + "(%" PRIu64 " of %" PRIu64 + " found, " "issued %" PRIu64 " non-exist keys)\n", found, read, nonexist); @@ -6119,8 +6108,8 @@ class Benchmark { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found, + read); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); @@ -6135,7 +6124,7 @@ class Benchmark { int64_t found = 0; ReadOptions options = read_options_; std::vector keys; - std::vector > key_guards; + std::vector> key_guards; std::vector values(entries_per_batch_); PinnableSlice* pin_values = new PinnableSlice[entries_per_batch_]; std::unique_ptr pin_values_guard(pin_values); @@ -6219,8 +6208,8 @@ class Benchmark { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", found, + read); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -6622,8 +6611,8 @@ class Benchmark { } else if (query_type == 1) { // the Put query puts++; - int64_t val_size = ParetoCdfInversion( - u, FLAGS_value_theta, FLAGS_value_k, FLAGS_value_sigma); + int64_t val_size = ParetoCdfInversion(u, FLAGS_value_theta, + FLAGS_value_k, FLAGS_value_sigma); if (val_size < 10) { val_size = 10; } else if (val_size > value_max) { @@ -6830,8 +6819,8 @@ class Benchmark { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found, + read); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -6894,13 +6883,9 @@ class Benchmark { } } - void DeleteSeq(ThreadState* thread) { - DoDelete(thread, true); - } + void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); } - void DeleteRandom(ThreadState* thread) { - DoDelete(thread, false); - } + void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); } void ReadWhileWriting(ThreadState* thread) { if (thread->tid > 0) { @@ -7006,9 +6991,9 @@ class Benchmark { thread->stats.FinishedOps(&db_, db_.db, 1, kWrite); if (FLAGS_benchmark_write_rate_limit > 0) { - write_rate_limiter->Request( - key.size() + val.size(), Env::IO_HIGH, - nullptr /* stats */, RateLimiter::OpType::kWrite); + write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH, + nullptr /* stats */, + RateLimiter::OpType::kWrite); } if (writes_per_range_tombstone_ > 0 && @@ -7132,7 +7117,6 @@ class Benchmark { return s; } - // Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V) // in DB atomically i.e in a single batch. Also refer GetMany. Status DeleteMany(DB* db, const WriteOptions& writeoptions, @@ -7244,7 +7228,7 @@ class Benchmark { put_weight = 100 - get_weight - delete_weight; } GenerateKeyFromInt(thread->rand.Next() % FLAGS_numdistinct, - FLAGS_numdistinct, &key); + FLAGS_numdistinct, &key); if (get_weight > 0) { // do all the gets first Status s = GetMany(db, key, &value); @@ -7282,8 +7266,8 @@ class Benchmark { } char msg[128]; snprintf(msg, sizeof(msg), - "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" \ - PRIu64 " found:%" PRIu64 ")", + "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" PRIu64 + " found:%" PRIu64 ")", gets_done, puts_done, deletes_done, readwrites_, found); thread->stats.AddMessage(msg); } @@ -7337,7 +7321,7 @@ class Benchmark { get_weight--; reads_done++; thread->stats.FinishedOps(nullptr, db, 1, kRead); - } else if (put_weight > 0) { + } else if (put_weight > 0) { // then do all the corresponding number of puts // for all the gets we have done earlier Status s; @@ -7357,8 +7341,9 @@ class Benchmark { } } char msg[100]; - snprintf(msg, sizeof(msg), "( reads:%" PRIu64 " writes:%" PRIu64 \ - " total:%" PRIu64 " found:%" PRIu64 ")", + snprintf(msg, sizeof(msg), + "( reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64 + " found:%" PRIu64 ")", reads_done, writes_done, readwrites_, found); thread->stats.AddMessage(msg); } @@ -7422,8 +7407,8 @@ class Benchmark { thread->stats.FinishedOps(nullptr, db, 1, kUpdate); } char msg[100]; - snprintf(msg, sizeof(msg), - "( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); + snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", + readwrites_, found); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -7466,7 +7451,8 @@ class Benchmark { exit(1); } - Slice value = gen.Generate(static_cast(existing_value.size())); + Slice value = + gen.Generate(static_cast(existing_value.size())); std::string new_value; if (status.ok()) { @@ -7490,8 +7476,8 @@ class Benchmark { thread->stats.FinishedOps(nullptr, db, 1); } char msg[100]; - snprintf(msg, sizeof(msg), - "( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); + snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", + readwrites_, found); thread->stats.AddMessage(msg); } @@ -7539,7 +7525,7 @@ class Benchmark { Slice operand = gen.Generate(); if (value.size() > 0) { // Use a delimiter to match the semantics for StringAppendOperator - value.append(1,','); + value.append(1, ','); } value.append(operand.data(), operand.size()); @@ -7561,7 +7547,7 @@ class Benchmark { char msg[100]; snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", - readwrites_, found); + readwrites_, found); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -7592,12 +7578,10 @@ class Benchmark { Slice val = gen.Generate(); if (FLAGS_num_column_families > 1) { s = db_with_cfh->db->Merge(write_options_, - db_with_cfh->GetCfh(key_rand), key, - val); + db_with_cfh->GetCfh(key_rand), key, val); } else { - s = db_with_cfh->db->Merge(write_options_, - db_with_cfh->db->DefaultColumnFamily(), key, - val); + s = db_with_cfh->db->Merge( + write_options_, db_with_cfh->db->DefaultColumnFamily(), key, val); } if (!s.ok()) { @@ -7650,8 +7634,7 @@ class Benchmark { thread->stats.FinishedOps(nullptr, db, 1, kMerge); } else { Status s = db->Get(read_options_, key, &value); - if (value.length() > max_length) - max_length = value.length(); + if (value.length() > max_length) max_length = value.length(); if (!s.ok() && !s.IsNotFound()) { fprintf(stderr, "get error: %s\n", s.ToString().c_str()); @@ -7920,9 +7903,8 @@ class Benchmark { return; } - Status s = - RandomTransactionInserter::Verify(db_.db, - static_cast(FLAGS_transaction_sets)); + Status s = RandomTransactionInserter::Verify( + db_.db, static_cast(FLAGS_transaction_sets)); if (s.ok()) { fprintf(stdout, "RandomTransactionVerify Success.\n"); @@ -8142,9 +8124,9 @@ class Benchmark { thread->stats.AddBytes(bytes); if (FLAGS_benchmark_write_rate_limit > 0) { - write_rate_limiter->Request( - key.size() + val.size(), Env::IO_HIGH, - nullptr /* stats */, RateLimiter::OpType::kWrite); + write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH, + nullptr /* stats */, + RateLimiter::OpType::kWrite); } } } @@ -8583,7 +8565,7 @@ int db_bench_tool(int argc, char** argv) { } FLAGS_compression_type_e = - StringToCompressionType(FLAGS_compression_type.c_str()); + StringToCompressionType(FLAGS_compression_type.c_str()); FLAGS_wal_compression_e = StringToCompressionType(FLAGS_wal_compression.c_str()); @@ -8594,7 +8576,7 @@ int db_bench_tool(int argc, char** argv) { #ifndef ROCKSDB_LITE // Stacked BlobDB FLAGS_blob_db_compression_type_e = - StringToCompressionType(FLAGS_blob_db_compression_type.c_str()); + StringToCompressionType(FLAGS_blob_db_compression_type.c_str()); int env_opts = !FLAGS_env_uri.empty() + !FLAGS_fs_uri.empty(); if (env_opts > 1) { @@ -8663,7 +8645,7 @@ int db_bench_tool(int argc, char** argv) { } FLAGS_value_size_distribution_type_e = - StringToDistributionType(FLAGS_value_size_distribution_type.c_str()); + StringToDistributionType(FLAGS_value_size_distribution_type.c_str()); // Note options sanitization may increase thread pool sizes according to // max_background_flushes/max_background_compactions/max_background_jobs diff --git a/tools/db_sanity_test.cc b/tools/db_sanity_test.cc index 1c16bf392..8cc67f5d5 100644 --- a/tools/db_sanity_test.cc +++ b/tools/db_sanity_test.cc @@ -5,19 +5,19 @@ #include #include -#include #include +#include +#include "port/port.h" +#include "rocksdb/comparator.h" #include "rocksdb/db.h" -#include "rocksdb/options.h" #include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" #include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" #include "rocksdb/status.h" -#include "rocksdb/comparator.h" #include "rocksdb/table.h" -#include "rocksdb/slice_transform.h" -#include "rocksdb/filter_policy.h" -#include "port/port.h" #include "util/string_util.h" namespace ROCKSDB_NAMESPACE { diff --git a/tools/dump/db_dump_tool.cc b/tools/dump/db_dump_tool.cc index be3ff7962..427a54d99 100644 --- a/tools/dump/db_dump_tool.cc +++ b/tools/dump/db_dump_tool.cc @@ -5,11 +5,12 @@ #ifndef ROCKSDB_LITE +#include "rocksdb/db_dump_tool.h" + #include #include #include "rocksdb/db.h" -#include "rocksdb/db_dump_tool.h" #include "rocksdb/env.h" #include "util/coding.h" diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 48e6b0d02..ecd2d2977 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -122,7 +122,7 @@ void DumpSstFile(Options options, std::string filename, bool output_hex, void DumpBlobFile(const std::string& filename, bool is_key_hex, bool is_value_hex, bool dump_uncompressed_blobs); -}; +}; // namespace LDBCommand* LDBCommand::InitFromCmdLineArgs( int argc, char const* const* argv, const Options& options, @@ -165,7 +165,7 @@ LDBCommand* LDBCommand::InitFromCmdLineArgs( const std::string OPTION_PREFIX = "--"; for (const auto& arg : args) { - if (arg[0] == '-' && arg[1] == '-'){ + if (arg[0] == '-' && arg[1] == '-') { std::vector splits = StringSplit(arg, '='); // --option_name=option_value if (splits.size() == 2) { @@ -295,8 +295,7 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) { parsed_params.flags); } else if (parsed_params.cmd == CheckPointCommand::Name()) { return new CheckPointCommand(parsed_params.cmd_params, - parsed_params.option_map, - parsed_params.flags); + parsed_params.option_map, parsed_params.flags); } else if (parsed_params.cmd == RepairCommand::Name()) { return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map, parsed_params.flags); @@ -884,7 +883,7 @@ void LDBCommand::OverrideBaseCFOptions(ColumnFamilyOptions* cf_opts) { int write_buffer_size; if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size, - exec_state_)) { + exec_state_)) { if (write_buffer_size > 0) { cf_opts->write_buffer_size = write_buffer_size; } else { @@ -1284,7 +1283,7 @@ void DBLoaderCommand::DoCommand() { } else if (0 == line.find("Created bg thread 0x")) { // ignore this line } else { - bad_lines ++; + bad_lines++; } } @@ -1373,7 +1372,6 @@ ManifestDumpCommand::ManifestDumpCommand( } void ManifestDumpCommand::DoCommand() { - std::string manifestfile; if (!path_.empty()) { @@ -1730,7 +1728,7 @@ void IncBucketCounts(std::vector& bucket_counts, int ttl_start, (void)num_buckets; #endif assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 && - timekv < (ttl_start + time_range) && num_buckets > 1); + timekv < (ttl_start + time_range) && num_buckets > 1); int bucket = (timekv - ttl_start) / bucket_size; bucket_counts[bucket]++; } @@ -1739,7 +1737,7 @@ void PrintBucketCounts(const std::vector& bucket_counts, int ttl_start, int ttl_end, int bucket_size, int num_buckets) { int time_point = ttl_start; - for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { + for (int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { fprintf(stdout, "Keys in range %s to %s : %lu\n", TimeToHumanString(time_point).c_str(), TimeToHumanString(time_point + bucket_size).c_str(), @@ -1784,10 +1782,10 @@ InternalDumpCommand::InternalDumpCommand( if (itr != options.end()) { delim_ = itr->second; count_delim_ = true; - // fprintf(stdout,"delim = %c\n",delim_[0]); + // fprintf(stdout,"delim = %c\n",delim_[0]); } else { count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM); - delim_="."; + delim_ = "."; } print_stats_ = IsFlagPresent(flags, ARG_STATS); @@ -1841,8 +1839,8 @@ void InternalDumpCommand::DoCommand() { } std::string rtype1, rtype2, row, val; rtype2 = ""; - uint64_t c=0; - uint64_t s1=0,s2=0; + uint64_t c = 0; + uint64_t s1 = 0, s2 = 0; long long count = 0; for (auto& key_version : key_versions) { @@ -1857,25 +1855,24 @@ void InternalDumpCommand::DoCommand() { int k; if (count_delim_) { rtype1 = ""; - s1=0; + s1 = 0; row = ikey.Encode().ToString(); val = key_version.value; - for(k=0;row[k]!='\x01' && row[k]!='\0';k++) - s1++; - for(k=0;val[k]!='\x01' && val[k]!='\0';k++) - s1++; - for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++) - rtype1+=row[j]; - if(rtype2.compare("") && rtype2.compare(rtype1)!=0) { + for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++; + for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++; + for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01'; + j++) + rtype1 += row[j]; + if (rtype2.compare("") && rtype2.compare(rtype1) != 0) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); - c=1; - s2=s1; + c = 1; + s2 = s1; rtype2 = rtype1; } else { c++; - s2+=s1; - rtype2=rtype1; + s2 += s1; + rtype2 = rtype1; } } @@ -1901,7 +1898,7 @@ void InternalDumpCommand::DoCommand() { // Terminate if maximum number of keys have been dumped if (max_keys_ > 0 && count >= max_keys_) break; } - if(count_delim_) { + if (count_delim_) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); } else { @@ -1966,7 +1963,7 @@ DBDumperCommand::DBDumperCommand( count_delim_ = true; } else { count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM); - delim_="."; + delim_ = "."; } print_stats_ = IsFlagPresent(flags, ARG_STATS); @@ -2114,13 +2111,13 @@ void DBDumperCommand::DoDumpCommand() { int bucket_size; if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) || bucket_size <= 0) { - bucket_size = time_range; // Will have just 1 bucket by default + bucket_size = time_range; // Will have just 1 bucket by default } - //cretaing variables for row count of each type + // cretaing variables for row count of each type std::string rtype1, rtype2, row, val; rtype2 = ""; - uint64_t c=0; - uint64_t s1=0,s2=0; + uint64_t c = 0; + uint64_t s1 = 0, s2 = 0; // At this point, bucket_size=0 => time_range=0 int num_buckets = (bucket_size >= time_range) @@ -2138,11 +2135,9 @@ void DBDumperCommand::DoDumpCommand() { for (; iter->Valid(); iter->Next()) { int rawtime = 0; // If end marker was specified, we stop before it - if (!null_to_ && (iter->key().ToString() >= to_)) - break; + if (!null_to_ && (iter->key().ToString() >= to_)) break; // Terminate if maximum number of keys have been dumped - if (max_keys == 0) - break; + if (max_keys == 0) break; if (is_db_ttl_) { TtlIterator* it_ttl = static_cast_with_check(iter); rawtime = it_ttl->ttl_timestamp(); @@ -2162,21 +2157,20 @@ void DBDumperCommand::DoDumpCommand() { rtype1 = ""; row = iter->key().ToString(); val = iter->value().ToString(); - s1 = row.size()+val.size(); - for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++) - rtype1+=row[j]; - if(rtype2.compare("") && rtype2.compare(rtype1)!=0) { + s1 = row.size() + val.size(); + for (int j = 0; row[j] != delim_[0] && row[j] != '\0'; j++) + rtype1 += row[j]; + if (rtype2.compare("") && rtype2.compare(rtype1) != 0) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); - c=1; - s2=s1; + c = 1; + s2 = s1; rtype2 = rtype1; } else { - c++; - s2+=s1; - rtype2=rtype1; + c++; + s2 += s1; + rtype2 = rtype1; } - } if (count_only_) { @@ -2197,7 +2191,7 @@ void DBDumperCommand::DoDumpCommand() { if (num_buckets > 1 && is_db_ttl_) { PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size, num_buckets); - } else if(count_delim_) { + } else if (count_delim_) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); } else { @@ -2228,7 +2222,7 @@ ReduceDBLevelsCommand::ReduceDBLevelsCommand( ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_); print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS); - if(new_levels_ <= 0) { + if (new_levels_ <= 0) { exec_state_ = LDBCommandExecuteResult::Failed( " Use --" + ARG_NEW_LEVELS + " to specify a new level number\n"); } @@ -2240,7 +2234,7 @@ std::vector ReduceDBLevelsCommand::PrepareArgs( ret.push_back("reduce_levels"); ret.push_back("--" + ARG_DB + "=" + db_path); ret.push_back("--" + ARG_NEW_LEVELS + "=" + std::to_string(new_levels)); - if(print_old_level) { + if (print_old_level) { ret.push_back("--" + ARG_PRINT_OLD_LEVELS); } return ret; @@ -2265,8 +2259,7 @@ void ReduceDBLevelsCommand::OverrideBaseCFOptions( cf_opts->max_bytes_for_level_multiplier = 1; } -Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, - int* levels) { +Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) { ImmutableDBOptions db_options(opt); EnvOptions soptions; std::shared_ptr tc( @@ -2364,9 +2357,9 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( old_compaction_style_(-1), new_compaction_style_(-1) { ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_, - exec_state_); + exec_state_); if (old_compaction_style_ != kCompactionStyleLevel && - old_compaction_style_ != kCompactionStyleUniversal) { + old_compaction_style_ != kCompactionStyleUniversal) { exec_state_ = LDBCommandExecuteResult::Failed( "Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " + "style. Check ldb help for proper compaction style value.\n"); @@ -2374,9 +2367,9 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( } ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_, - exec_state_); + exec_state_); if (new_compaction_style_ != kCompactionStyleLevel && - new_compaction_style_ != kCompactionStyleUniversal) { + new_compaction_style_ != kCompactionStyleUniversal) { exec_state_ = LDBCommandExecuteResult::Failed( "Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " + "style. Check ldb help for proper compaction style value.\n"); @@ -2716,7 +2709,6 @@ WALDumperCommand::WALDumperCommand( wal_file_ = itr->second; } - print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER); print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE); is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true); @@ -2779,7 +2771,7 @@ void GetCommand::DoCommand() { Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value); if (st.ok()) { fprintf(stdout, "%s\n", - (is_value_hex_ ? StringToHex(value) : value).c_str()); + (is_value_hex_ ? StringToHex(value) : value).c_str()); } else { std::stringstream oss; oss << "Get failed: " << st.ToString(); @@ -3017,9 +3009,9 @@ void ScanCommand::DoCommand() { TimeToHumanString(ttl_start).c_str(), TimeToHumanString(ttl_end).c_str()); } - for ( ; - it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_); - it->Next()) { + for (; + it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_); + it->Next()) { if (is_db_ttl_) { TtlIterator* it_ttl = static_cast_with_check(it); int rawtime = it_ttl->ttl_timestamp(); @@ -3253,8 +3245,9 @@ void DBQuerierCommand::Help(std::string& ret) { ret.append(DBQuerierCommand::Name()); ret.append(" [--" + ARG_TTL + "]"); ret.append("\n"); - ret.append(" Starts a REPL shell. Type help for list of available " - "commands."); + ret.append( + " Starts a REPL shell. Type help for list of available " + "commands."); ret.append("\n"); } @@ -3281,7 +3274,7 @@ void DBQuerierCommand::DoCommand() { if (pos2 == std::string::npos) { break; } - tokens.push_back(line.substr(pos, pos2-pos)); + tokens.push_back(line.substr(pos, pos2 - pos)); pos = pos2 + 1; } tokens.push_back(line.substr(pos)); @@ -3315,8 +3308,8 @@ void DBQuerierCommand::DoCommand() { key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]); s = db_->Get(read_options, GetCfHandle(), Slice(key), &value); if (s.ok()) { - fprintf(stdout, "%s\n", PrintKeyValue(key, value, - is_key_hex_, is_value_hex_).c_str()); + fprintf(stdout, "%s\n", + PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str()); } else { if (s.IsNotFound()) { fprintf(stdout, "Not found %s\n", tokens[1].c_str()); diff --git a/tools/ldb_cmd_impl.h b/tools/ldb_cmd_impl.h index 17848f2cd..97de981b1 100644 --- a/tools/ldb_cmd_impl.h +++ b/tools/ldb_cmd_impl.h @@ -5,13 +5,13 @@ #pragma once -#include "rocksdb/utilities/ldb_cmd.h" - #include #include #include #include +#include "rocksdb/utilities/ldb_cmd.h" + namespace ROCKSDB_NAMESPACE { class CompactorCommand : public LDBCommand { @@ -85,7 +85,7 @@ class DBDumperCommand : public LDBCommand { /** * Extract file name from the full path. We handle both the forward slash (/) * and backslash (\) to make sure that different OS-s are supported. - */ + */ static std::string GetFileNameFromPath(const std::string& s) { std::size_t n = s.find_last_of("/\\"); @@ -573,14 +573,15 @@ class CheckPointCommand : public LDBCommand { static std::string Name() { return "checkpoint"; } CheckPointCommand(const std::vector& params, - const std::map& options, - const std::vector& flags); + const std::map& options, + const std::vector& flags); void DoCommand() override; static void Help(std::string& ret); std::string checkpoint_dir_; + private: static const std::string ARG_CHECKPOINT_DIR; }; diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index 5f9e05bb6..5d83a6cd9 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -26,9 +26,9 @@ #include "util/file_checksum_helper.h" #include "util/random.h" +using std::map; using std::string; using std::vector; -using std::map; namespace ROCKSDB_NAMESPACE { @@ -70,7 +70,7 @@ TEST_F(LdbCmdTest, HexToString) { auto actual = ROCKSDB_NAMESPACE::LDBCommand::HexToString(inPair.first); auto expected = inPair.second; for (unsigned int i = 0; i < actual.length(); i++) { - EXPECT_EQ(expected[i], static_cast((signed char) actual[i])); + EXPECT_EQ(expected[i], static_cast((signed char)actual[i])); } auto reverse = ROCKSDB_NAMESPACE::LDBCommand::StringToHex(actual); EXPECT_STRCASEEQ(inPair.first.c_str(), reverse.c_str()); diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index 402516419..eadb6a095 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -5,6 +5,7 @@ // #ifndef ROCKSDB_LITE #include "rocksdb/ldb_tool.h" + #include "rocksdb/utilities/ldb_cmd.h" #include "tools/ldb_cmd_impl.h" diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index c538554a0..c8604bf43 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -19,7 +19,7 @@ namespace ROCKSDB_NAMESPACE { class ReduceLevelTest : public testing::Test { -public: + public: ReduceLevelTest() { dbname_ = test::PerThreadDBPath("db_reduce_levels_test"); EXPECT_OK(DestroyDB(dbname_, Options())); @@ -75,7 +75,7 @@ public: return atoi(property.c_str()); } -private: + private: std::string dbname_; DB* db_; }; diff --git a/tools/simulated_hybrid_file_system.cc b/tools/simulated_hybrid_file_system.cc index 675d2593f..a474417c7 100644 --- a/tools/simulated_hybrid_file_system.cc +++ b/tools/simulated_hybrid_file_system.cc @@ -6,13 +6,12 @@ #include "util/stop_watch.h" #ifndef ROCKSDB_LITE -#include "tools/simulated_hybrid_file_system.h" - #include #include #include #include "rocksdb/rate_limiter.h" +#include "tools/simulated_hybrid_file_system.h" namespace ROCKSDB_NAMESPACE { diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 7053366e7..0a2c28280 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -259,9 +259,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { try { in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key); } catch (...) { - std::cerr << "ERROR: Invalid key input '" - << in_key - << "' Use 0x{hex representation of internal rocksdb key}" << std::endl; + std::cerr << "ERROR: Invalid key input '" << in_key + << "' Use 0x{hex representation of internal rocksdb key}" + << std::endl; return -1; } Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key); @@ -331,14 +331,15 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { } } - if(has_compression_level_from && has_compression_level_to) { - if(!has_specified_compression_types || compression_types.size() != 1) { + if (has_compression_level_from && has_compression_level_to) { + if (!has_specified_compression_types || compression_types.size() != 1) { fprintf(stderr, "Specify one compression type.\n\n"); exit(1); } - } else if(has_compression_level_from || has_compression_level_to) { - fprintf(stderr, "Specify both --compression_level_from and " - "--compression_level_to.\n\n"); + } else if (has_compression_level_from || has_compression_level_to) { + fprintf(stderr, + "Specify both --compression_level_from and " + "--compression_level_to.\n\n"); exit(1); } @@ -476,8 +477,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { has_from || use_from_as_prefix, from_key, has_to, to_key, use_from_as_prefix); if (!st.ok()) { - fprintf(stderr, "%s: %s\n", filename.c_str(), - st.ToString().c_str()); + fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); } total_read += dumper.GetReadNumber(); if (read_num > 0 && total_read > read_num) { diff --git a/tools/trace_analyzer_test.cc b/tools/trace_analyzer_test.cc index 146c0c6f4..d7f9e4da8 100644 --- a/tools/trace_analyzer_test.cc +++ b/tools/trace_analyzer_test.cc @@ -111,7 +111,7 @@ class TraceAnalyzerTest : public testing::Test { single_iter->SeekForPrev("b"); ASSERT_OK(single_iter->status()); delete single_iter; - std::this_thread::sleep_for (std::chrono::seconds(1)); + std::this_thread::sleep_for(std::chrono::seconds(1)); db_->Get(ro, "g", &value).PermitUncheckedError(); diff --git a/tools/write_stress.cc b/tools/write_stress.cc index 31161ce1c..ba5bd3f4f 100644 --- a/tools/write_stress.cc +++ b/tools/write_stress.cc @@ -208,13 +208,16 @@ class WriteStress { SystemClock::Default()->SleepForMicroseconds( static_cast(FLAGS_prefix_mutate_period_sec * 1000 * 1000LL)); if (dist(rng) < FLAGS_first_char_mutate_probability) { - key_prefix_[0].store(static_cast(char_dist(rng)), std::memory_order_relaxed); + key_prefix_[0].store(static_cast(char_dist(rng)), + std::memory_order_relaxed); } if (dist(rng) < FLAGS_second_char_mutate_probability) { - key_prefix_[1].store(static_cast(char_dist(rng)), std::memory_order_relaxed); + key_prefix_[1].store(static_cast(char_dist(rng)), + std::memory_order_relaxed); } if (dist(rng) < FLAGS_third_char_mutate_probability) { - key_prefix_[2].store(static_cast(char_dist(rng)), std::memory_order_relaxed); + key_prefix_[2].store(static_cast(char_dist(rng)), + std::memory_order_relaxed); } } }