Run clang format against files under tools/ and db_stress_tool/ (#10868)

Summary:
Some lines of .h and .cc files are not properly fomatted. Clear them up with clang format.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10868

Test Plan: Watch existing CI to pass

Reviewed By: ajkr

Differential Revision: D40683485

fbshipit-source-id: 491fbb78b2cdcb948164f306829909ad816d5d0b
main
sdong 2 years ago committed by Facebook GitHub Bot
parent 95a1935cb1
commit 48fe921754
  1. 4
      db_stress_tool/batched_ops_stress.cc
  2. 4
      db_stress_tool/db_stress_common.h
  3. 4
      db_stress_tool/db_stress_shared_state.h
  4. 1
      tools/blob_dump.cc
  5. 14
      tools/block_cache_analyzer/block_cache_trace_analyzer.cc
  6. 3
      tools/block_cache_analyzer/block_cache_trace_analyzer.h
  7. 488
      tools/db_bench_tool.cc
  8. 12
      tools/db_sanity_test.cc
  9. 3
      tools/dump/db_dump_tool.cc
  10. 33
      tools/ldb_cmd.cc
  11. 5
      tools/ldb_cmd_impl.h
  12. 2
      tools/ldb_cmd_test.cc
  13. 1
      tools/ldb_tool.cc
  14. 3
      tools/simulated_hybrid_file_system.cc
  15. 12
      tools/sst_dump_tool.cc
  16. 9
      tools/write_stress.cc

@ -188,8 +188,8 @@ class BatchedOpsStressTest : public StressTest {
const std::vector<int64_t>& rand_keys) override { const std::vector<int64_t>& rand_keys) override {
size_t num_keys = rand_keys.size(); size_t num_keys = rand_keys.size();
std::vector<Status> ret_status(num_keys); std::vector<Status> ret_status(num_keys);
std::array<std::string, 10> keys = {{"0", "1", "2", "3", "4", std::array<std::string, 10> keys = {
"5", "6", "7", "8", "9"}}; {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}};
size_t num_prefixes = keys.size(); size_t num_prefixes = keys.size();
for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) { for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
std::vector<Slice> key_slices; std::vector<Slice> key_slices;

@ -509,8 +509,8 @@ extern inline std::string Key(int64_t val) {
if (offset < weight) { if (offset < weight) {
// Use the bottom 3 bits of offset as the number of trailing 'x's in the // Use the bottom 3 bits of offset as the number of trailing 'x's in the
// key. If the next key is going to be of the next level, then skip the // key. If the next key is going to be of the next level, then skip the
// trailer as it would break ordering. If the key length is already at max, // trailer as it would break ordering. If the key length is already at
// skip the trailer. // max, skip the trailer.
if (offset < weight - 1 && level < levels - 1) { if (offset < weight - 1 && level < levels - 1) {
size_t trailer_len = offset & 0x7; size_t trailer_len = offset & 0x7;
key.append(trailer_len, 'x'); key.append(trailer_len, 'x');

@ -333,9 +333,7 @@ class SharedState {
uint64_t GetStartTimestamp() const { return start_timestamp_; } uint64_t GetStartTimestamp() const { return start_timestamp_; }
private: private:
static void IgnoreReadErrorCallback(void*) { static void IgnoreReadErrorCallback(void*) { ignore_read_error = true; }
ignore_read_error = true;
}
// Pick random keys in each column family that will not experience overwrite. // Pick random keys in each column family that will not experience overwrite.
std::unordered_set<int64_t> GenerateNoOverwriteIds() const { std::unordered_set<int64_t> GenerateNoOverwriteIds() const {

@ -5,6 +5,7 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include <getopt.h> #include <getopt.h>
#include <cstdio> #include <cstdio>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>

@ -1175,7 +1175,8 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime(
} }
void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline( void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
const uint64_t reuse_window, bool user_access_only, TraceType block_type) const { const uint64_t reuse_window, bool user_access_only,
TraceType block_type) const {
// A map from block key to an array of bools that states whether a block is // A map from block key to an array of bools that states whether a block is
// accessed in a time window. // accessed in a time window.
std::map<uint64_t, std::vector<bool>> block_accessed; std::map<uint64_t, std::vector<bool>> block_accessed;
@ -1214,7 +1215,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
TraverseBlocks(block_callback); TraverseBlocks(block_callback);
// A cell is the number of blocks accessed in a reuse window. // A cell is the number of blocks accessed in a reuse window.
std::unique_ptr<uint64_t[]> reuse_table(new uint64_t[reuse_vector_size * reuse_vector_size]); std::unique_ptr<uint64_t[]> reuse_table(
new uint64_t[reuse_vector_size * reuse_vector_size]);
for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) { for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
// Initialize the reuse_table. // Initialize the reuse_table.
for (uint64_t i = 0; i < reuse_vector_size; i++) { for (uint64_t i = 0; i < reuse_vector_size; i++) {
@ -1255,7 +1257,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
if (j < start_time) { if (j < start_time) {
row += "100.0"; row += "100.0";
} else { } else {
row += std::to_string(percent(reuse_table[start_time * reuse_vector_size + j], row += std::to_string(
percent(reuse_table[start_time * reuse_vector_size + j],
reuse_table[start_time * reuse_vector_size + start_time])); reuse_table[start_time * reuse_vector_size + start_time]));
} }
} }
@ -1811,8 +1814,9 @@ void BlockCacheTraceAnalyzer::PrintDataBlockAccessStats() const {
return; return;
} }
// Use four decimal points. // Use four decimal points.
uint64_t percent_referenced_for_existing_keys = (uint64_t)( uint64_t percent_referenced_for_existing_keys =
((double)block.key_num_access_map.size() / (double)block.num_keys) * (uint64_t)(((double)block.key_num_access_map.size() /
(double)block.num_keys) *
10000.0); 10000.0);
uint64_t percent_referenced_for_non_existing_keys = uint64_t percent_referenced_for_non_existing_keys =
(uint64_t)(((double)block.non_exist_key_num_access_map.size() / (uint64_t)(((double)block.non_exist_key_num_access_map.size() /

@ -292,7 +292,8 @@ class BlockCacheTraceAnalyzer {
// The file is named // The file is named
// "block_type_user_access_only_reuse_window_reuse_timeline". The file format // "block_type_user_access_only_reuse_window_reuse_timeline". The file format
// is start_time,0,1,...,N where N equals trace_duration / reuse_window. // is start_time,0,1,...,N where N equals trace_duration / reuse_window.
void WriteBlockReuseTimeline(const uint64_t reuse_window, bool user_access_only, void WriteBlockReuseTimeline(const uint64_t reuse_window,
bool user_access_only,
TraceType block_type) const; TraceType block_type) const;
// Write the Get spatical locality into csv files saved in 'output_dir'. // Write the Get spatical locality into csv files saved in 'output_dir'.

@ -289,10 +289,12 @@ DEFINE_string(column_family_distribution, "",
"and `num_hot_column_families=0`, a valid list could be " "and `num_hot_column_families=0`, a valid list could be "
"\"10,20,30,40\"."); "\"10,20,30,40\".");
DEFINE_int64(reads, -1, "Number of read operations to do. " DEFINE_int64(reads, -1,
"Number of read operations to do. "
"If negative, do FLAGS_num reads."); "If negative, do FLAGS_num reads.");
DEFINE_int64(deletes, -1, "Number of delete operations to do. " DEFINE_int64(deletes, -1,
"Number of delete operations to do. "
"If negative, do FLAGS_num deletions."); "If negative, do FLAGS_num deletions.");
DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality"); DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality");
@ -304,7 +306,8 @@ static int64_t seed_base;
DEFINE_int32(threads, 1, "Number of concurrent threads to run."); DEFINE_int32(threads, 1, "Number of concurrent threads to run.");
DEFINE_int32(duration, 0, "Time in seconds for the random-ops tests to run." DEFINE_int32(duration, 0,
"Time in seconds for the random-ops tests to run."
" When 0 then num & reads determine the test duration"); " When 0 then num & reads determine the test duration");
DEFINE_string(value_size_distribution_type, "fixed", DEFINE_string(value_size_distribution_type, "fixed",
@ -357,8 +360,9 @@ DEFINE_int32(user_timestamp_size, 0,
DEFINE_int32(num_multi_db, 0, DEFINE_int32(num_multi_db, 0,
"Number of DBs used in the benchmark. 0 means single DB."); "Number of DBs used in the benchmark. 0 means single DB.");
DEFINE_double(compression_ratio, 0.5, "Arrange to generate values that shrink" DEFINE_double(compression_ratio, 0.5,
" to this fraction of their original size after compression"); "Arrange to generate values that shrink to this fraction of "
"their original size after compression");
DEFINE_double( DEFINE_double(
overwrite_probability, 0.0, overwrite_probability, 0.0,
@ -514,9 +518,8 @@ DEFINE_int32(max_background_compactions,
DEFINE_uint64(subcompactions, 1, DEFINE_uint64(subcompactions, 1,
"Maximum number of subcompactions to divide L0-L1 compactions " "Maximum number of subcompactions to divide L0-L1 compactions "
"into."); "into.");
static const bool FLAGS_subcompactions_dummy static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) =
__attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_subcompactions, RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range);
&ValidateUint32Range);
DEFINE_int32(max_background_flushes, DEFINE_int32(max_background_flushes,
ROCKSDB_NAMESPACE::Options().max_background_flushes, ROCKSDB_NAMESPACE::Options().max_background_flushes,
@ -537,11 +540,13 @@ DEFINE_int32(universal_size_ratio, 0,
"Percentage flexibility while comparing file size " "Percentage flexibility while comparing file size "
"(for universal compaction only)."); "(for universal compaction only).");
DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files in a" DEFINE_int32(universal_min_merge_width, 0,
" single compaction run (for universal compaction only)."); "The minimum number of files in a single compaction run "
"(for universal compaction only).");
DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact" DEFINE_int32(universal_max_merge_width, 0,
" in universal style compaction"); "The max number of files to compact in universal style "
"compaction");
DEFINE_int32(universal_max_size_amplification_percent, 0, DEFINE_int32(universal_max_size_amplification_percent, 0,
"The max size amplification for universal style compaction"); "The max size amplification for universal style compaction");
@ -747,9 +752,10 @@ DEFINE_bool(whole_key_filtering,
ROCKSDB_NAMESPACE::BlockBasedTableOptions().whole_key_filtering, ROCKSDB_NAMESPACE::BlockBasedTableOptions().whole_key_filtering,
"Use whole keys (in addition to prefixes) in SST bloom filter."); "Use whole keys (in addition to prefixes) in SST bloom filter.");
DEFINE_bool(use_existing_db, false, "If true, do not destroy the existing" DEFINE_bool(use_existing_db, false,
" database. If you set this flag and also specify a benchmark that" "If true, do not destroy the existing database. If you set this "
" wants a fresh database, that benchmark will fail."); "flag and also specify a benchmark that wants a fresh database, "
"that benchmark will fail.");
DEFINE_bool(use_existing_keys, false, DEFINE_bool(use_existing_keys, false,
"If true, uses existing keys in the DB, " "If true, uses existing keys in the DB, "
@ -787,16 +793,15 @@ DEFINE_bool(use_keep_filter, false, "Whether to use a noop compaction filter");
static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) { static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) {
if (value >= 20) { if (value >= 20) {
fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", flagname,
flagname, value); value);
return false; return false;
} }
return true; return true;
} }
DEFINE_bool(verify_checksum, true, DEFINE_bool(verify_checksum, true,
"Verify checksum for every block read" "Verify checksum for every block read from storage");
" from storage");
DEFINE_int32(checksum_type, DEFINE_int32(checksum_type,
ROCKSDB_NAMESPACE::BlockBasedTableOptions().checksum, ROCKSDB_NAMESPACE::BlockBasedTableOptions().checksum,
@ -808,10 +813,11 @@ DEFINE_int32(stats_level, ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers,
DEFINE_string(statistics_string, "", "Serialized statistics string"); DEFINE_string(statistics_string, "", "Serialized statistics string");
static class std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats; static class std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats;
DEFINE_int64(writes, -1, "Number of write operations to do. If negative, do" DEFINE_int64(writes, -1,
" --num reads."); "Number of write operations to do. If negative, do --num reads.");
DEFINE_bool(finish_after_writes, false, "Write thread terminates after all writes are finished"); DEFINE_bool(finish_after_writes, false,
"Write thread terminates after all writes are finished");
DEFINE_bool(sync, false, "Sync all writes to disk"); DEFINE_bool(sync, false, "Sync all writes to disk");
@ -877,24 +883,27 @@ DEFINE_uint64(ttl_seconds, ROCKSDB_NAMESPACE::Options().ttl, "Set options.ttl");
static bool ValidateInt32Percent(const char* flagname, int32_t value) { static bool ValidateInt32Percent(const char* flagname, int32_t value) {
if (value <= 0 || value >= 100) { if (value <= 0 || value >= 100) {
fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", flagname,
flagname, value); value);
return false; return false;
} }
return true; return true;
} }
DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed" DEFINE_int32(readwritepercent, 90,
" as percentage) for the ReadRandomWriteRandom workload. The " "Ratio of reads to reads/writes (expressed as percentage) for "
"default value 90 means 90% operations out of all reads and writes" "the ReadRandomWriteRandom workload. The default value 90 means "
" operations are reads. In other words, 9 gets for every 1 put."); "90% operations out of all reads and writes operations are "
"reads. In other words, 9 gets for every 1 put.");
DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed"
" as percentage) for the ReadRandomMergeRandom workload. The" DEFINE_int32(mergereadpercent, 70,
" default value 70 means 70% out of all read and merge operations" "Ratio of merges to merges&reads (expressed as percentage) for "
" are merges. In other words, 7 merges for every 3 gets."); "the ReadRandomMergeRandom workload. The default value 70 means "
"70% out of all read and merge operations are merges. In other "
DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/" "words, 7 merges for every 3 gets.");
"deletes (used in RandomWithVerify only). RandomWithVerify "
DEFINE_int32(deletepercent, 2,
"Percentage of deletes out of reads/writes/deletes (used in "
"RandomWithVerify only). RandomWithVerify "
"calculates writepercent as (100 - FLAGS_readwritepercent - " "calculates writepercent as (100 - FLAGS_readwritepercent - "
"deletepercent), so deletepercent must be smaller than (100 - " "deletepercent), so deletepercent must be smaller than (100 - "
"FLAGS_readwritepercent)"); "FLAGS_readwritepercent)");
@ -1304,7 +1313,8 @@ DEFINE_int32(compression_zstd_max_train_bytes,
"Maximum size of training data passed to zstd's dictionary " "Maximum size of training data passed to zstd's dictionary "
"trainer."); "trainer.");
DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts" DEFINE_int32(min_level_to_compress, -1,
"If non-negative, compression starts"
" from this level. Levels with number < min_level_to_compress are" " from this level. Levels with number < min_level_to_compress are"
" not compressed. Otherwise, apply compression_type to " " not compressed. Otherwise, apply compression_type to "
"all levels."); "all levels.");
@ -1342,8 +1352,8 @@ DEFINE_string(fs_uri, "",
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
DEFINE_string(simulate_hybrid_fs_file, "", DEFINE_string(simulate_hybrid_fs_file, "",
"File for Store Metadata for Simulate hybrid FS. Empty means " "File for Store Metadata for Simulate hybrid FS. Empty means "
"disable the feature. Now, if it is set, " "disable the feature. Now, if it is set, last_level_temperature "
"last_level_temperature is set to kWarm."); "is set to kWarm.");
DEFINE_int32(simulate_hybrid_hdd_multipliers, 1, DEFINE_int32(simulate_hybrid_hdd_multipliers, 1,
"In simulate_hybrid_fs_file or simulate_hdd mode, how many HDDs " "In simulate_hybrid_fs_file or simulate_hdd mode, how many HDDs "
"are simulated."); "are simulated.");
@ -1360,18 +1370,21 @@ static std::shared_ptr<ROCKSDB_NAMESPACE::Env> env_guard;
static ROCKSDB_NAMESPACE::Env* FLAGS_env = ROCKSDB_NAMESPACE::Env::Default(); static ROCKSDB_NAMESPACE::Env* FLAGS_env = ROCKSDB_NAMESPACE::Env::Default();
DEFINE_int64(stats_interval, 0, "Stats are reported every N operations when " DEFINE_int64(stats_interval, 0,
"this is greater than zero. When 0 the interval grows over time."); "Stats are reported every N operations when this is greater than "
"zero. When 0 the interval grows over time.");
DEFINE_int64(stats_interval_seconds, 0, "Report stats every N seconds. This " DEFINE_int64(stats_interval_seconds, 0,
"overrides stats_interval when both are > 0."); "Report stats every N seconds. This overrides stats_interval when"
" both are > 0.");
DEFINE_int32(stats_per_interval, 0, "Reports additional stats per interval when" DEFINE_int32(stats_per_interval, 0,
" this is greater than 0."); "Reports additional stats per interval when this is greater than "
"0.");
DEFINE_uint64(slow_usecs, 1000000, DEFINE_uint64(slow_usecs, 1000000,
"A message is printed for operations that " "A message is printed for operations that take at least this "
"take at least this many microseconds."); "many microseconds.");
DEFINE_int64(report_interval_seconds, 0, DEFINE_int64(report_interval_seconds, 0,
"If greater than zero, it will write simple stats in CSV format " "If greater than zero, it will write simple stats in CSV format "
@ -1441,24 +1454,19 @@ DEFINE_bool(rate_limiter_auto_tuned, false,
"Enable dynamic adjustment of rate limit according to demand for " "Enable dynamic adjustment of rate limit according to demand for "
"background I/O"); "background I/O");
DEFINE_bool(sine_write_rate, false, "Use a sine wave write_rate_limit");
DEFINE_bool(sine_write_rate, false, DEFINE_uint64(
"Use a sine wave write_rate_limit"); sine_write_rate_interval_milliseconds, 10000,
DEFINE_uint64(sine_write_rate_interval_milliseconds, 10000,
"Interval of which the sine wave write_rate_limit is recalculated"); "Interval of which the sine wave write_rate_limit is recalculated");
DEFINE_double(sine_a, 1, DEFINE_double(sine_a, 1, "A in f(x) = A sin(bx + c) + d");
"A in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_b, 1, DEFINE_double(sine_b, 1, "B in f(x) = A sin(bx + c) + d");
"B in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_c, 0, DEFINE_double(sine_c, 0, "C in f(x) = A sin(bx + c) + d");
"C in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_d, 1, DEFINE_double(sine_d, 1, "D in f(x) = A sin(bx + c) + d");
"D in f(x) = A sin(bx + c) + d");
DEFINE_bool(rate_limit_bg_reads, false, DEFINE_bool(rate_limit_bg_reads, false,
"Use options.rate_limiter on compaction reads"); "Use options.rate_limiter on compaction reads");
@ -1548,8 +1556,8 @@ DEFINE_bool(print_malloc_stats, false,
DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions"); DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions");
DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds."); DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds.");
DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files" DEFINE_uint64(wal_size_limit_MB, 0,
" in MB."); "Set the size limit for the WAL Files in MB.");
DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size"); DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size");
DEFINE_bool(mmap_read, ROCKSDB_NAMESPACE::Options().allow_mmap_reads, DEFINE_bool(mmap_read, ROCKSDB_NAMESPACE::Options().allow_mmap_reads,
@ -1616,8 +1624,9 @@ DEFINE_int32(num_deletion_threads, 1,
"Number of threads to do deletion (used in TimeSeries and delete " "Number of threads to do deletion (used in TimeSeries and delete "
"expire_style only)."); "expire_style only).");
DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge" DEFINE_int32(max_successive_merges, 0,
" operations on a key in the memtable"); "Maximum number of successive merge operations on a key in the "
"memtable");
static bool ValidatePrefixSize(const char* flagname, int32_t value) { static bool ValidatePrefixSize(const char* flagname, int32_t value) {
if (value < 0 || value >= 2000000000) { if (value < 0 || value >= 2000000000) {
@ -1628,11 +1637,12 @@ static bool ValidatePrefixSize(const char* flagname, int32_t value) {
return true; return true;
} }
DEFINE_int32(prefix_size, 0, "control the prefix size for HashSkipList and " DEFINE_int32(prefix_size, 0,
"plain table"); "control the prefix size for HashSkipList and plain table");
DEFINE_int64(keys_per_prefix, 0, "control average number of keys generated " DEFINE_int64(keys_per_prefix, 0,
"per prefix, 0 means no special handling of the prefix, " "control average number of keys generated per prefix, 0 means no "
"i.e. use the prefix comes with the generated random number."); "special handling of the prefix, i.e. use the prefix comes with "
"the generated random number.");
DEFINE_bool(total_order_seek, false, DEFINE_bool(total_order_seek, false,
"Enable total order seek regardless of index format."); "Enable total order seek regardless of index format.");
DEFINE_bool(prefix_same_as_start, false, DEFINE_bool(prefix_same_as_start, false,
@ -1644,13 +1654,13 @@ DEFINE_bool(
DEFINE_int32(memtable_insert_with_hint_prefix_size, 0, DEFINE_int32(memtable_insert_with_hint_prefix_size, 0,
"If non-zero, enable " "If non-zero, enable "
"memtable insert with hint with the given prefix size."); "memtable insert with hint with the given prefix size.");
DEFINE_bool(enable_io_prio, false, "Lower the background flush/compaction " DEFINE_bool(enable_io_prio, false,
"threads' IO priority"); "Lower the background flush/compaction threads' IO priority");
DEFINE_bool(enable_cpu_prio, false, "Lower the background flush/compaction " DEFINE_bool(enable_cpu_prio, false,
"threads' CPU priority"); "Lower the background flush/compaction threads' CPU priority");
DEFINE_bool(identity_as_first_hash, false, "the first hash function of cuckoo " DEFINE_bool(identity_as_first_hash, false,
"table becomes an identity function. This is only valid when key " "the first hash function of cuckoo table becomes an identity "
"is 8 bytes"); "function. This is only valid when key is 8 bytes");
DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG "); DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG ");
DEFINE_uint64(stats_dump_period_sec, DEFINE_uint64(stats_dump_period_sec,
ROCKSDB_NAMESPACE::Options().stats_dump_period_sec, ROCKSDB_NAMESPACE::Options().stats_dump_period_sec,
@ -1673,22 +1683,23 @@ DEFINE_bool(multiread_batched, false, "Use the new MultiGet API");
DEFINE_string(memtablerep, "skip_list", ""); DEFINE_string(memtablerep, "skip_list", "");
DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count"); DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count");
DEFINE_bool(use_plain_table, false, "if use plain table " DEFINE_bool(use_plain_table, false,
"instead of block-based table format"); "if use plain table instead of block-based table format");
DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format"); DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format");
DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table."); DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table.");
DEFINE_bool(use_hash_search, false, "if use kHashSearch " DEFINE_bool(use_hash_search, false,
"instead of kBinarySearch. " "if use kHashSearch instead of kBinarySearch. "
"This is valid if only we use BlockTable"); "This is valid if only we use BlockTable");
DEFINE_string(merge_operator, "", "The merge operator to use with the database." DEFINE_string(merge_operator, "",
"The merge operator to use with the database."
"If a new merge operator is specified, be sure to use fresh" "If a new merge operator is specified, be sure to use fresh"
" database The possible merge operators are defined in" " database The possible merge operators are defined in"
" utilities/merge_operators.h"); " utilities/merge_operators.h");
DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try " DEFINE_int32(skip_list_lookahead, 0,
"linear search first for this many steps from the previous " "Used with skip_list memtablerep; try linear search first for "
"position"); "this many steps from the previous position");
DEFINE_bool(report_file_operations, false, "if report number of file " DEFINE_bool(report_file_operations, false,
"operations"); "if report number of file operations");
DEFINE_bool(report_open_timing, false, "if report open timing"); DEFINE_bool(report_open_timing, false, "if report open timing");
DEFINE_int32(readahead_size, 0, "Iterator readahead size"); DEFINE_int32(readahead_size, 0, "Iterator readahead size");
@ -1724,9 +1735,9 @@ DEFINE_bool(allow_data_in_errors,
static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) = static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent); RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent);
static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) = static const bool FLAGS_table_cache_numshardbits_dummy
RegisterFlagValidator(&FLAGS_table_cache_numshardbits, __attribute__((__unused__)) = RegisterFlagValidator(
&ValidateTableCacheNumshardbits); &FLAGS_table_cache_numshardbits, &ValidateTableCacheNumshardbits);
DEFINE_uint32(write_batch_protection_bytes_per_key, 0, DEFINE_uint32(write_batch_protection_bytes_per_key, 0,
"Size of per-key-value checksum in each write batch. Currently " "Size of per-key-value checksum in each write batch. Currently "
@ -1775,11 +1786,7 @@ static Status CreateMemTableRepFactory(
} // namespace } // namespace
enum DistributionType : unsigned char { enum DistributionType : unsigned char { kFixed = 0, kUniform, kNormal };
kFixed = 0,
kUniform,
kNormal
};
static enum DistributionType FLAGS_value_size_distribution_type_e = kFixed; static enum DistributionType FLAGS_value_size_distribution_type_e = kFixed;
@ -1811,33 +1818,27 @@ class BaseDistribution {
} }
return val; return val;
} }
private: private:
virtual unsigned int Get() = 0; virtual unsigned int Get() = 0;
virtual bool NeedTruncate() { virtual bool NeedTruncate() { return true; }
return true;
}
unsigned int min_value_size_; unsigned int min_value_size_;
unsigned int max_value_size_; unsigned int max_value_size_;
}; };
class FixedDistribution : public BaseDistribution class FixedDistribution : public BaseDistribution {
{
public: public:
FixedDistribution(unsigned int size) : FixedDistribution(unsigned int size)
BaseDistribution(size, size), : BaseDistribution(size, size), size_(size) {}
size_(size) {}
private: private:
virtual unsigned int Get() override { virtual unsigned int Get() override { return size_; }
return size_; virtual bool NeedTruncate() override { return false; }
}
virtual bool NeedTruncate() override {
return false;
}
unsigned int size_; unsigned int size_;
}; };
class NormalDistribution class NormalDistribution : public BaseDistribution,
: public BaseDistribution, public std::normal_distribution<double> { public std::normal_distribution<double> {
public: public:
NormalDistribution(unsigned int _min, unsigned int _max) NormalDistribution(unsigned int _min, unsigned int _max)
: BaseDistribution(_min, _max), : BaseDistribution(_min, _max),
@ -1855,8 +1856,7 @@ class NormalDistribution
std::mt19937 gen_; std::mt19937 gen_;
}; };
class UniformDistribution class UniformDistribution : public BaseDistribution,
: public BaseDistribution,
public std::uniform_int_distribution<unsigned int> { public std::uniform_int_distribution<unsigned int> {
public: public:
UniformDistribution(unsigned int _min, unsigned int _max) UniformDistribution(unsigned int _min, unsigned int _max)
@ -1865,12 +1865,8 @@ class UniformDistribution
gen_(rd_()) {} gen_(rd_()) {}
private: private:
virtual unsigned int Get() override { virtual unsigned int Get() override { return (*this)(gen_); }
return (*this)(gen_); virtual bool NeedTruncate() override { return false; }
}
virtual bool NeedTruncate() override {
return false;
}
std::random_device rd_; std::random_device rd_;
std::mt19937 gen_; std::mt19937 gen_;
}; };
@ -1883,7 +1879,6 @@ class RandomGenerator {
std::unique_ptr<BaseDistribution> dist_; std::unique_ptr<BaseDistribution> dist_;
public: public:
RandomGenerator() { RandomGenerator() {
auto max_value_size = FLAGS_value_size_max; auto max_value_size = FLAGS_value_size_max;
switch (FLAGS_value_size_distribution_type_e) { switch (FLAGS_value_size_distribution_type_e) {
@ -1892,8 +1887,8 @@ class RandomGenerator {
FLAGS_value_size_max)); FLAGS_value_size_max));
break; break;
case kNormal: case kNormal:
dist_.reset(new NormalDistribution(FLAGS_value_size_min, dist_.reset(
FLAGS_value_size_max)); new NormalDistribution(FLAGS_value_size_min, FLAGS_value_size_max));
break; break;
case kFixed: case kFixed:
default: default:
@ -1955,7 +1950,8 @@ struct DBWithColumnFamilies {
DBWithColumnFamilies() DBWithColumnFamilies()
: db(nullptr) : db(nullptr)
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
, opt_txn_db(nullptr) ,
opt_txn_db(nullptr)
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
{ {
cfh.clear(); cfh.clear();
@ -2138,19 +2134,12 @@ enum OperationType : unsigned char {
}; };
static std::unordered_map<OperationType, std::string, std::hash<unsigned char>> static std::unordered_map<OperationType, std::string, std::hash<unsigned char>>
OperationTypeString = { OperationTypeString = {{kRead, "read"}, {kWrite, "write"},
{kRead, "read"}, {kDelete, "delete"}, {kSeek, "seek"},
{kWrite, "write"}, {kMerge, "merge"}, {kUpdate, "update"},
{kDelete, "delete"}, {kCompress, "compress"}, {kCompress, "uncompress"},
{kSeek, "seek"}, {kCrc, "crc"}, {kHash, "hash"},
{kMerge, "merge"}, {kOthers, "op"}};
{kUpdate, "update"},
{kCompress, "compress"},
{kCompress, "uncompress"},
{kCrc, "crc"},
{kHash, "hash"},
{kOthers, "op"}
};
class CombinedStats; class CombinedStats;
class Stats { class Stats {
@ -2168,7 +2157,8 @@ class Stats {
uint64_t last_op_finish_; uint64_t last_op_finish_;
uint64_t last_report_finish_; uint64_t last_report_finish_;
std::unordered_map<OperationType, std::shared_ptr<HistogramImpl>, std::unordered_map<OperationType, std::shared_ptr<HistogramImpl>,
std::hash<unsigned char>> hist_; std::hash<unsigned char>>
hist_;
std::string message_; std::string message_;
bool exclude_from_merge_; bool exclude_from_merge_;
ReporterAgent* reporter_agent_; // does not own ReporterAgent* reporter_agent_; // does not own
@ -2200,8 +2190,7 @@ class Stats {
} }
void Merge(const Stats& other) { void Merge(const Stats& other) {
if (other.exclude_from_merge_) if (other.exclude_from_merge_) return;
return;
for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) { for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) {
auto this_it = hist_.find(it->first); auto this_it = hist_.find(it->first);
@ -2227,9 +2216,7 @@ class Stats {
seconds_ = (finish_ - start_) * 1e-6; seconds_ = (finish_ - start_) * 1e-6;
} }
void AddMessage(Slice msg) { void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
AppendWithSpace(&message_, msg);
}
void SetId(int id) { id_ = id; } void SetId(int id) { id_ = id; }
void SetExcludeFromMerge() { exclude_from_merge_ = true; } void SetExcludeFromMerge() { exclude_from_merge_ = true; }
@ -2238,9 +2225,9 @@ class Stats {
std::vector<ThreadStatus> thread_list; std::vector<ThreadStatus> thread_list;
FLAGS_env->GetThreadList(&thread_list); FLAGS_env->GetThreadList(&thread_list);
fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", "ThreadID",
"ThreadID", "ThreadType", "cfName", "Operation", "ThreadType", "cfName", "Operation", "ElapsedTime", "Stage",
"ElapsedTime", "Stage", "State", "OperationProperties"); "State", "OperationProperties");
int64_t current_time = 0; int64_t current_time = 0;
clock_->GetCurrentTime(&current_time).PermitUncheckedError(); clock_->GetCurrentTime(&current_time).PermitUncheckedError();
@ -2257,8 +2244,8 @@ class Stats {
auto op_properties = ThreadStatus::InterpretOperationProperties( auto op_properties = ThreadStatus::InterpretOperationProperties(
ts.operation_type, ts.op_properties); ts.operation_type, ts.op_properties);
for (const auto& op_prop : op_properties) { for (const auto& op_prop : op_properties) {
fprintf(stderr, " %s %" PRIu64" |", fprintf(stderr, " %s %" PRIu64 " |", op_prop.first.c_str(),
op_prop.first.c_str(), op_prop.second); op_prop.second);
} }
fprintf(stderr, "\n"); fprintf(stderr, "\n");
} }
@ -2266,13 +2253,9 @@ class Stats {
void ResetSineInterval() { sine_interval_ = clock_->NowMicros(); } void ResetSineInterval() { sine_interval_ = clock_->NowMicros(); }
uint64_t GetSineInterval() { uint64_t GetSineInterval() { return sine_interval_; }
return sine_interval_;
}
uint64_t GetStart() { uint64_t GetStart() { return start_; }
return start_;
}
void ResetLastOpTime() { void ResetLastOpTime() {
// Set to now to avoid latency from calls to SleepForMicroseconds. // Set to now to avoid latency from calls to SleepForMicroseconds.
@ -2288,8 +2271,7 @@ class Stats {
uint64_t now = clock_->NowMicros(); uint64_t now = clock_->NowMicros();
uint64_t micros = now - last_op_finish_; uint64_t micros = now - last_op_finish_;
if (hist_.find(op_type) == hist_.end()) if (hist_.find(op_type) == hist_.end()) {
{
auto hist_temp = std::make_shared<HistogramImpl>(); auto hist_temp = std::make_shared<HistogramImpl>();
hist_.insert({op_type, std::move(hist_temp)}); hist_.insert({op_type, std::move(hist_temp)});
} }
@ -2305,13 +2287,20 @@ class Stats {
done_ += num_ops; done_ += num_ops;
if (done_ >= next_report_ && FLAGS_progress_reports) { if (done_ >= next_report_ && FLAGS_progress_reports) {
if (!FLAGS_stats_interval) { if (!FLAGS_stats_interval) {
if (next_report_ < 1000) next_report_ += 100; if (next_report_ < 1000)
else if (next_report_ < 5000) next_report_ += 500; next_report_ += 100;
else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 5000)
else if (next_report_ < 50000) next_report_ += 5000; next_report_ += 500;
else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 10000)
else if (next_report_ < 500000) next_report_ += 50000; next_report_ += 1000;
else next_report_ += 100000; else if (next_report_ < 50000)
next_report_ += 5000;
else if (next_report_ < 100000)
next_report_ += 10000;
else if (next_report_ < 500000)
next_report_ += 50000;
else
next_report_ += 100000;
fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, ""); fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, "");
} else { } else {
uint64_t now = clock_->NowMicros(); uint64_t now = clock_->NowMicros();
@ -2397,9 +2386,7 @@ class Stats {
} }
} }
void AddBytes(int64_t n) { void AddBytes(int64_t n) { bytes_ += n; }
bytes_ += n;
}
void Report(const Slice& name) { void Report(const Slice& name) {
// Pretend at least one op was done in case we are running a benchmark // Pretend at least one op was done in case we are running a benchmark
@ -2810,28 +2797,30 @@ class Benchmark {
FLAGS_key_size, FLAGS_user_timestamp_size); FLAGS_key_size, FLAGS_user_timestamp_size);
auto avg_value_size = FLAGS_value_size; auto avg_value_size = FLAGS_value_size;
if (FLAGS_value_size_distribution_type_e == kFixed) { if (FLAGS_value_size_distribution_type_e == kFixed) {
fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", fprintf(stdout,
"Values: %d bytes each (%d bytes after compression)\n",
avg_value_size, avg_value_size,
static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5)); static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5));
} else { } else {
avg_value_size = (FLAGS_value_size_min + FLAGS_value_size_max) / 2; avg_value_size = (FLAGS_value_size_min + FLAGS_value_size_max) / 2;
fprintf(stdout, "Values: %d avg bytes each (%d bytes after compression)\n", fprintf(stdout,
"Values: %d avg bytes each (%d bytes after compression)\n",
avg_value_size, avg_value_size,
static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5)); static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Values Distribution: %s (min: %d, max: %d)\n", fprintf(stdout, "Values Distribution: %s (min: %d, max: %d)\n",
FLAGS_value_size_distribution_type.c_str(), FLAGS_value_size_distribution_type.c_str(), FLAGS_value_size_min,
FLAGS_value_size_min, FLAGS_value_size_max); FLAGS_value_size_max);
} }
fprintf(stdout, "Entries: %" PRIu64 "\n", num_); fprintf(stdout, "Entries: %" PRIu64 "\n", num_);
fprintf(stdout, "Prefix: %d bytes\n", FLAGS_prefix_size); fprintf(stdout, "Prefix: %d bytes\n", FLAGS_prefix_size);
fprintf(stdout, "Keys per prefix: %" PRIu64 "\n", keys_per_prefix_); fprintf(stdout, "Keys per prefix: %" PRIu64 "\n", keys_per_prefix_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n", fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
((static_cast<int64_t>(FLAGS_key_size + avg_value_size) * num_) ((static_cast<int64_t>(FLAGS_key_size + avg_value_size) * num_) /
/ 1048576.0)); 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n", fprintf(
(((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio) stdout, "FileSize: %.1f MB (estimated)\n",
* num_) (((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio) * num_) /
/ 1048576.0)); 1048576.0));
fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n", fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n",
FLAGS_benchmark_write_rate_limit); FLAGS_benchmark_write_rate_limit);
fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n", fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n",
@ -2865,9 +2854,9 @@ class Benchmark {
void PrintWarnings(const char* compression) { void PrintWarnings(const char* compression) {
#if defined(__GNUC__) && !defined(__OPTIMIZE__) #if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf(stdout, fprintf(
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" stdout,
); "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif #endif
#ifndef NDEBUG #ifndef NDEBUG
fprintf(stdout, fprintf(stdout,
@ -4230,7 +4219,8 @@ class Benchmark {
} else if ((FLAGS_prefix_size == 0) && } else if ((FLAGS_prefix_size == 0) &&
(options.memtable_factory->IsInstanceOf("prefix_hash") || (options.memtable_factory->IsInstanceOf("prefix_hash") ||
options.memtable_factory->IsInstanceOf("hash_linkedlist"))) { options.memtable_factory->IsInstanceOf("hash_linkedlist"))) {
fprintf(stderr, "prefix_size should be non-zero if PrefixHash or " fprintf(stderr,
"prefix_size should be non-zero if PrefixHash or "
"HashLinkedList memtablerep is used\n"); "HashLinkedList memtablerep is used\n");
exit(1); exit(1);
} }
@ -4272,8 +4262,8 @@ class Benchmark {
ROCKSDB_NAMESPACE::CuckooTableOptions table_options; ROCKSDB_NAMESPACE::CuckooTableOptions table_options;
table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio; table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio;
table_options.identity_as_first_hash = FLAGS_identity_as_first_hash; table_options.identity_as_first_hash = FLAGS_identity_as_first_hash;
options.table_factory = std::shared_ptr<TableFactory>( options.table_factory =
NewCuckooTableFactory(table_options)); std::shared_ptr<TableFactory>(NewCuckooTableFactory(table_options));
#else #else
fprintf(stderr, "Cuckoo table is not supported in lite mode\n"); fprintf(stderr, "Cuckoo table is not supported in lite mode\n");
exit(1); exit(1);
@ -4546,8 +4536,7 @@ class Benchmark {
for (int i = 0; i < FLAGS_min_level_to_compress; i++) { for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
options.compression_per_level[i] = kNoCompression; options.compression_per_level[i] = kNoCompression;
} }
for (int i = FLAGS_min_level_to_compress; for (int i = FLAGS_min_level_to_compress; i < FLAGS_num_levels; i++) {
i < FLAGS_num_levels; i++) {
options.compression_per_level[i] = FLAGS_compression_type_e; options.compression_per_level[i] = FLAGS_compression_type_e;
} }
} }
@ -4848,8 +4837,8 @@ class Benchmark {
} }
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
if (FLAGS_readonly) { if (FLAGS_readonly) {
s = DB::OpenForReadOnly(options, db_name, column_families, s = DB::OpenForReadOnly(options, db_name, column_families, &db->cfh,
&db->cfh, &db->db); &db->db);
} else if (FLAGS_optimistic_transaction_db) { } else if (FLAGS_optimistic_transaction_db) {
s = OptimisticTransactionDB::Open(options, db_name, column_families, s = OptimisticTransactionDB::Open(options, db_name, column_families,
&db->cfh, &db->opt_txn_db); &db->cfh, &db->opt_txn_db);
@ -4960,9 +4949,7 @@ class Benchmark {
} }
} }
enum WriteMode { enum WriteMode { RANDOM, SEQUENTIAL, UNIQUE_RANDOM };
RANDOM, SEQUENTIAL, UNIQUE_RANDOM
};
void WriteSeqDeterministic(ThreadState* thread) { void WriteSeqDeterministic(ThreadState* thread) {
DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL); DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL);
@ -4973,13 +4960,9 @@ class Benchmark {
UNIQUE_RANDOM); UNIQUE_RANDOM);
} }
void WriteSeq(ThreadState* thread) { void WriteSeq(ThreadState* thread) { DoWrite(thread, SEQUENTIAL); }
DoWrite(thread, SEQUENTIAL);
}
void WriteRandom(ThreadState* thread) { void WriteRandom(ThreadState* thread) { DoWrite(thread, RANDOM); }
DoWrite(thread, RANDOM);
}
void WriteUniqueRandom(ThreadState* thread) { void WriteUniqueRandom(ThreadState* thread) {
DoWrite(thread, UNIQUE_RANDOM); DoWrite(thread, UNIQUE_RANDOM);
@ -5033,9 +5016,7 @@ class Benchmark {
std::vector<uint64_t> values_; std::vector<uint64_t> values_;
}; };
DB* SelectDB(ThreadState* thread) { DB* SelectDB(ThreadState* thread) { return SelectDBWithCfh(thread)->db; }
return SelectDBWithCfh(thread)->db;
}
DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) { DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) {
return SelectDBWithCfh(thread->rand.Next()); return SelectDBWithCfh(thread->rand.Next());
@ -5354,8 +5335,7 @@ class Benchmark {
// We use same rand_num as seed for key and column family so that we // We use same rand_num as seed for key and column family so that we
// can deterministically find the cfh corresponding to a particular // can deterministically find the cfh corresponding to a particular
// key while reading the key. // key while reading the key.
batch.Put(db_with_cfh->GetCfh(rand_num), key, batch.Put(db_with_cfh->GetCfh(rand_num), key, val);
val);
} }
batch_bytes += val.size() + key_size_ + user_timestamp_size_; batch_bytes += val.size() + key_size_ + user_timestamp_size_;
bytes += val.size() + key_size_ + user_timestamp_size_; bytes += val.size() + key_size_ + user_timestamp_size_;
@ -5427,8 +5407,8 @@ class Benchmark {
} }
if (thread->shared->write_rate_limiter.get() != nullptr) { if (thread->shared->write_rate_limiter.get() != nullptr) {
thread->shared->write_rate_limiter->Request( thread->shared->write_rate_limiter->Request(
batch_bytes, Env::IO_HIGH, batch_bytes, Env::IO_HIGH, nullptr /* stats */,
nullptr /* stats */, RateLimiter::OpType::kWrite); RateLimiter::OpType::kWrite);
// Set time at which last op finished to Now() to hide latency and // Set time at which last op finished to Now() to hide latency and
// sleep from rate limiter. Also, do the check once per batch, not // sleep from rate limiter. Also, do the check once per batch, not
// once per write. // once per write.
@ -5564,11 +5544,13 @@ class Benchmark {
continue; continue;
} }
} }
writes_ /= static_cast<int64_t>(open_options_.max_bytes_for_level_multiplier); writes_ /=
static_cast<int64_t>(open_options_.max_bytes_for_level_multiplier);
} }
for (size_t i = 0; i < num_db; i++) { for (size_t i = 0; i < num_db; i++) {
if (sorted_runs[i].size() < num_levels - 1) { if (sorted_runs[i].size() < num_levels - 1) {
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n",
num_levels);
exit(1); exit(1);
} }
} }
@ -5579,12 +5561,13 @@ class Benchmark {
auto options = db->GetOptions(); auto options = db->GetOptions();
MutableCFOptions mutable_cf_options(options); MutableCFOptions mutable_cf_options(options);
for (size_t j = 0; j < sorted_runs[i].size(); j++) { for (size_t j = 0; j < sorted_runs[i].size(); j++) {
compactionOptions.output_file_size_limit = compactionOptions.output_file_size_limit = MaxFileSizeForLevel(
MaxFileSizeForLevel(mutable_cf_options, mutable_cf_options, static_cast<int>(output_level),
static_cast<int>(output_level), compaction_style); compaction_style);
std::cout << sorted_runs[i][j].size() << std::endl; std::cout << sorted_runs[i][j].size() << std::endl;
db->CompactFiles(compactionOptions, {sorted_runs[i][j].back().name, db->CompactFiles(
sorted_runs[i][j].front().name}, compactionOptions,
{sorted_runs[i][j].back().name, sorted_runs[i][j].front().name},
static_cast<int>(output_level - j) /*level*/); static_cast<int>(output_level - j) /*level*/);
} }
} }
@ -5616,11 +5599,13 @@ class Benchmark {
} }
num_files_at_level0[i] = meta.levels[0].files.size(); num_files_at_level0[i] = meta.levels[0].files.size();
} }
writes_ = static_cast<int64_t>(writes_* static_cast<double>(100) / (ratio + 200)); writes_ = static_cast<int64_t>(writes_ * static_cast<double>(100) /
(ratio + 200));
} }
for (size_t i = 0; i < num_db; i++) { for (size_t i = 0; i < num_db; i++) {
if (sorted_runs[i].size() < num_levels) { if (sorted_runs[i].size() < num_levels) {
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n",
num_levels);
exit(1); exit(1);
} }
} }
@ -5631,9 +5616,9 @@ class Benchmark {
auto options = db->GetOptions(); auto options = db->GetOptions();
MutableCFOptions mutable_cf_options(options); MutableCFOptions mutable_cf_options(options);
for (size_t j = 0; j < sorted_runs[i].size(); j++) { for (size_t j = 0; j < sorted_runs[i].size(); j++) {
compactionOptions.output_file_size_limit = compactionOptions.output_file_size_limit = MaxFileSizeForLevel(
MaxFileSizeForLevel(mutable_cf_options, mutable_cf_options, static_cast<int>(output_level),
static_cast<int>(output_level), compaction_style); compaction_style);
db->CompactFiles( db->CompactFiles(
compactionOptions, compactionOptions,
{sorted_runs[i][j].back().name, sorted_runs[i][j].front().name}, {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name},
@ -5765,7 +5750,9 @@ class Benchmark {
for (size_t k = 0; k < num_db; k++) { for (size_t k = 0; k < num_db; k++) {
auto db = db_list[k]; auto db = db_list[k];
fprintf(stdout, fprintf(stdout,
"---------------------- DB %" ROCKSDB_PRIszt " LSM ---------------------\n", k); "---------------------- DB %" ROCKSDB_PRIszt
" LSM ---------------------\n",
k);
db->GetColumnFamilyMetaData(&meta); db->GetColumnFamilyMetaData(&meta);
for (auto& levelMeta : meta.levels) { for (auto& levelMeta : meta.levels) {
if (levelMeta.files.empty()) { if (levelMeta.files.empty()) {
@ -5983,7 +5970,9 @@ class Benchmark {
} while (!duration.Done(100)); } while (!duration.Done(100));
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found, " snprintf(msg, sizeof(msg),
"(%" PRIu64 " of %" PRIu64
" found, "
"issued %" PRIu64 " non-exist keys)\n", "issued %" PRIu64 " non-exist keys)\n",
found, read, nonexist); found, read, nonexist);
@ -6119,8 +6108,8 @@ class Benchmark {
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found,
found, read); read);
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
@ -6219,8 +6208,8 @@ class Benchmark {
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", found,
found, read); read);
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -6622,8 +6611,8 @@ class Benchmark {
} else if (query_type == 1) { } else if (query_type == 1) {
// the Put query // the Put query
puts++; puts++;
int64_t val_size = ParetoCdfInversion( int64_t val_size = ParetoCdfInversion(u, FLAGS_value_theta,
u, FLAGS_value_theta, FLAGS_value_k, FLAGS_value_sigma); FLAGS_value_k, FLAGS_value_sigma);
if (val_size < 10) { if (val_size < 10) {
val_size = 10; val_size = 10;
} else if (val_size > value_max) { } else if (val_size > value_max) {
@ -6830,8 +6819,8 @@ class Benchmark {
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found,
found, read); read);
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -6894,13 +6883,9 @@ class Benchmark {
} }
} }
void DeleteSeq(ThreadState* thread) { void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
DoDelete(thread, true);
}
void DeleteRandom(ThreadState* thread) { void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
DoDelete(thread, false);
}
void ReadWhileWriting(ThreadState* thread) { void ReadWhileWriting(ThreadState* thread) {
if (thread->tid > 0) { if (thread->tid > 0) {
@ -7006,9 +6991,9 @@ class Benchmark {
thread->stats.FinishedOps(&db_, db_.db, 1, kWrite); thread->stats.FinishedOps(&db_, db_.db, 1, kWrite);
if (FLAGS_benchmark_write_rate_limit > 0) { if (FLAGS_benchmark_write_rate_limit > 0) {
write_rate_limiter->Request( write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH,
key.size() + val.size(), Env::IO_HIGH, nullptr /* stats */,
nullptr /* stats */, RateLimiter::OpType::kWrite); RateLimiter::OpType::kWrite);
} }
if (writes_per_range_tombstone_ > 0 && if (writes_per_range_tombstone_ > 0 &&
@ -7132,7 +7117,6 @@ class Benchmark {
return s; return s;
} }
// Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V) // Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V)
// in DB atomically i.e in a single batch. Also refer GetMany. // in DB atomically i.e in a single batch. Also refer GetMany.
Status DeleteMany(DB* db, const WriteOptions& writeoptions, Status DeleteMany(DB* db, const WriteOptions& writeoptions,
@ -7282,8 +7266,8 @@ class Benchmark {
} }
char msg[128]; char msg[128];
snprintf(msg, sizeof(msg), snprintf(msg, sizeof(msg),
"( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" \ "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" PRIu64
PRIu64 " found:%" PRIu64 ")", " found:%" PRIu64 ")",
gets_done, puts_done, deletes_done, readwrites_, found); gets_done, puts_done, deletes_done, readwrites_, found);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -7357,8 +7341,9 @@ class Benchmark {
} }
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "( reads:%" PRIu64 " writes:%" PRIu64 \ snprintf(msg, sizeof(msg),
" total:%" PRIu64 " found:%" PRIu64 ")", "( reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64
" found:%" PRIu64 ")",
reads_done, writes_done, readwrites_, found); reads_done, writes_done, readwrites_, found);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -7422,8 +7407,8 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1, kUpdate); thread->stats.FinishedOps(nullptr, db, 1, kUpdate);
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")",
"( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); readwrites_, found);
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -7466,7 +7451,8 @@ class Benchmark {
exit(1); exit(1);
} }
Slice value = gen.Generate(static_cast<unsigned int>(existing_value.size())); Slice value =
gen.Generate(static_cast<unsigned int>(existing_value.size()));
std::string new_value; std::string new_value;
if (status.ok()) { if (status.ok()) {
@ -7490,8 +7476,8 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1); thread->stats.FinishedOps(nullptr, db, 1);
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")",
"( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); readwrites_, found);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -7592,12 +7578,10 @@ class Benchmark {
Slice val = gen.Generate(); Slice val = gen.Generate();
if (FLAGS_num_column_families > 1) { if (FLAGS_num_column_families > 1) {
s = db_with_cfh->db->Merge(write_options_, s = db_with_cfh->db->Merge(write_options_,
db_with_cfh->GetCfh(key_rand), key, db_with_cfh->GetCfh(key_rand), key, val);
val);
} else { } else {
s = db_with_cfh->db->Merge(write_options_, s = db_with_cfh->db->Merge(
db_with_cfh->db->DefaultColumnFamily(), key, write_options_, db_with_cfh->db->DefaultColumnFamily(), key, val);
val);
} }
if (!s.ok()) { if (!s.ok()) {
@ -7650,8 +7634,7 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1, kMerge); thread->stats.FinishedOps(nullptr, db, 1, kMerge);
} else { } else {
Status s = db->Get(read_options_, key, &value); Status s = db->Get(read_options_, key, &value);
if (value.length() > max_length) if (value.length() > max_length) max_length = value.length();
max_length = value.length();
if (!s.ok() && !s.IsNotFound()) { if (!s.ok() && !s.IsNotFound()) {
fprintf(stderr, "get error: %s\n", s.ToString().c_str()); fprintf(stderr, "get error: %s\n", s.ToString().c_str());
@ -7920,9 +7903,8 @@ class Benchmark {
return; return;
} }
Status s = Status s = RandomTransactionInserter::Verify(
RandomTransactionInserter::Verify(db_.db, db_.db, static_cast<uint16_t>(FLAGS_transaction_sets));
static_cast<uint16_t>(FLAGS_transaction_sets));
if (s.ok()) { if (s.ok()) {
fprintf(stdout, "RandomTransactionVerify Success.\n"); fprintf(stdout, "RandomTransactionVerify Success.\n");
@ -8142,9 +8124,9 @@ class Benchmark {
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
if (FLAGS_benchmark_write_rate_limit > 0) { if (FLAGS_benchmark_write_rate_limit > 0) {
write_rate_limiter->Request( write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH,
key.size() + val.size(), Env::IO_HIGH, nullptr /* stats */,
nullptr /* stats */, RateLimiter::OpType::kWrite); RateLimiter::OpType::kWrite);
} }
} }
} }

@ -5,19 +5,19 @@
#include <cstdio> #include <cstdio>
#include <cstdlib> #include <cstdlib>
#include <vector>
#include <memory> #include <memory>
#include <vector>
#include "port/port.h"
#include "rocksdb/comparator.h"
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/options.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/status.h" #include "rocksdb/status.h"
#include "rocksdb/comparator.h"
#include "rocksdb/table.h" #include "rocksdb/table.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/filter_policy.h"
#include "port/port.h"
#include "util/string_util.h" #include "util/string_util.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -5,11 +5,12 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "rocksdb/db_dump_tool.h"
#include <cinttypes> #include <cinttypes>
#include <iostream> #include <iostream>
#include "rocksdb/db.h" #include "rocksdb/db.h"
#include "rocksdb/db_dump_tool.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/coding.h" #include "util/coding.h"

@ -122,7 +122,7 @@ void DumpSstFile(Options options, std::string filename, bool output_hex,
void DumpBlobFile(const std::string& filename, bool is_key_hex, void DumpBlobFile(const std::string& filename, bool is_key_hex,
bool is_value_hex, bool dump_uncompressed_blobs); bool is_value_hex, bool dump_uncompressed_blobs);
}; }; // namespace
LDBCommand* LDBCommand::InitFromCmdLineArgs( LDBCommand* LDBCommand::InitFromCmdLineArgs(
int argc, char const* const* argv, const Options& options, int argc, char const* const* argv, const Options& options,
@ -295,8 +295,7 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
parsed_params.flags); parsed_params.flags);
} else if (parsed_params.cmd == CheckPointCommand::Name()) { } else if (parsed_params.cmd == CheckPointCommand::Name()) {
return new CheckPointCommand(parsed_params.cmd_params, return new CheckPointCommand(parsed_params.cmd_params,
parsed_params.option_map, parsed_params.option_map, parsed_params.flags);
parsed_params.flags);
} else if (parsed_params.cmd == RepairCommand::Name()) { } else if (parsed_params.cmd == RepairCommand::Name()) {
return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map, return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
parsed_params.flags); parsed_params.flags);
@ -1373,7 +1372,6 @@ ManifestDumpCommand::ManifestDumpCommand(
} }
void ManifestDumpCommand::DoCommand() { void ManifestDumpCommand::DoCommand() {
std::string manifestfile; std::string manifestfile;
if (!path_.empty()) { if (!path_.empty()) {
@ -1860,11 +1858,10 @@ void InternalDumpCommand::DoCommand() {
s1 = 0; s1 = 0;
row = ikey.Encode().ToString(); row = ikey.Encode().ToString();
val = key_version.value; val = key_version.value;
for(k=0;row[k]!='\x01' && row[k]!='\0';k++) for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++;
s1++; for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++;
for(k=0;val[k]!='\x01' && val[k]!='\0';k++) for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01';
s1++; j++)
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
rtype1 += row[j]; rtype1 += row[j];
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) { if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
@ -2138,11 +2135,9 @@ void DBDumperCommand::DoDumpCommand() {
for (; iter->Valid(); iter->Next()) { for (; iter->Valid(); iter->Next()) {
int rawtime = 0; int rawtime = 0;
// If end marker was specified, we stop before it // If end marker was specified, we stop before it
if (!null_to_ && (iter->key().ToString() >= to_)) if (!null_to_ && (iter->key().ToString() >= to_)) break;
break;
// Terminate if maximum number of keys have been dumped // Terminate if maximum number of keys have been dumped
if (max_keys == 0) if (max_keys == 0) break;
break;
if (is_db_ttl_) { if (is_db_ttl_) {
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter); TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter);
rawtime = it_ttl->ttl_timestamp(); rawtime = it_ttl->ttl_timestamp();
@ -2176,7 +2171,6 @@ void DBDumperCommand::DoDumpCommand() {
s2 += s1; s2 += s1;
rtype2 = rtype1; rtype2 = rtype1;
} }
} }
if (count_only_) { if (count_only_) {
@ -2265,8 +2259,7 @@ void ReduceDBLevelsCommand::OverrideBaseCFOptions(
cf_opts->max_bytes_for_level_multiplier = 1; cf_opts->max_bytes_for_level_multiplier = 1;
} }
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) {
int* levels) {
ImmutableDBOptions db_options(opt); ImmutableDBOptions db_options(opt);
EnvOptions soptions; EnvOptions soptions;
std::shared_ptr<Cache> tc( std::shared_ptr<Cache> tc(
@ -2716,7 +2709,6 @@ WALDumperCommand::WALDumperCommand(
wal_file_ = itr->second; wal_file_ = itr->second;
} }
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER); print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE); print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true); is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true);
@ -3253,7 +3245,8 @@ void DBQuerierCommand::Help(std::string& ret) {
ret.append(DBQuerierCommand::Name()); ret.append(DBQuerierCommand::Name());
ret.append(" [--" + ARG_TTL + "]"); ret.append(" [--" + ARG_TTL + "]");
ret.append("\n"); ret.append("\n");
ret.append(" Starts a REPL shell. Type help for list of available " ret.append(
" Starts a REPL shell. Type help for list of available "
"commands."); "commands.");
ret.append("\n"); ret.append("\n");
} }
@ -3315,8 +3308,8 @@ void DBQuerierCommand::DoCommand() {
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]); key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
s = db_->Get(read_options, GetCfHandle(), Slice(key), &value); s = db_->Get(read_options, GetCfHandle(), Slice(key), &value);
if (s.ok()) { if (s.ok()) {
fprintf(stdout, "%s\n", PrintKeyValue(key, value, fprintf(stdout, "%s\n",
is_key_hex_, is_value_hex_).c_str()); PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str());
} else { } else {
if (s.IsNotFound()) { if (s.IsNotFound()) {
fprintf(stdout, "Not found %s\n", tokens[1].c_str()); fprintf(stdout, "Not found %s\n", tokens[1].c_str());

@ -5,13 +5,13 @@
#pragma once #pragma once
#include "rocksdb/utilities/ldb_cmd.h"
#include <map> #include <map>
#include <string> #include <string>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "rocksdb/utilities/ldb_cmd.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
class CompactorCommand : public LDBCommand { class CompactorCommand : public LDBCommand {
@ -581,6 +581,7 @@ class CheckPointCommand : public LDBCommand {
static void Help(std::string& ret); static void Help(std::string& ret);
std::string checkpoint_dir_; std::string checkpoint_dir_;
private: private:
static const std::string ARG_CHECKPOINT_DIR; static const std::string ARG_CHECKPOINT_DIR;
}; };

@ -26,9 +26,9 @@
#include "util/file_checksum_helper.h" #include "util/file_checksum_helper.h"
#include "util/random.h" #include "util/random.h"
using std::map;
using std::string; using std::string;
using std::vector; using std::vector;
using std::map;
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -5,6 +5,7 @@
// //
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "rocksdb/ldb_tool.h" #include "rocksdb/ldb_tool.h"
#include "rocksdb/utilities/ldb_cmd.h" #include "rocksdb/utilities/ldb_cmd.h"
#include "tools/ldb_cmd_impl.h" #include "tools/ldb_cmd_impl.h"

@ -6,13 +6,12 @@
#include "util/stop_watch.h" #include "util/stop_watch.h"
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "tools/simulated_hybrid_file_system.h"
#include <algorithm> #include <algorithm>
#include <sstream> #include <sstream>
#include <string> #include <string>
#include "rocksdb/rate_limiter.h" #include "rocksdb/rate_limiter.h"
#include "tools/simulated_hybrid_file_system.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {

@ -259,9 +259,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
try { try {
in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key); in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key);
} catch (...) { } catch (...) {
std::cerr << "ERROR: Invalid key input '" std::cerr << "ERROR: Invalid key input '" << in_key
<< in_key << "' Use 0x{hex representation of internal rocksdb key}"
<< "' Use 0x{hex representation of internal rocksdb key}" << std::endl; << std::endl;
return -1; return -1;
} }
Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key); Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key);
@ -337,7 +337,8 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
exit(1); exit(1);
} }
} else if (has_compression_level_from || has_compression_level_to) { } else if (has_compression_level_from || has_compression_level_to) {
fprintf(stderr, "Specify both --compression_level_from and " fprintf(stderr,
"Specify both --compression_level_from and "
"--compression_level_to.\n\n"); "--compression_level_to.\n\n");
exit(1); exit(1);
} }
@ -476,8 +477,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
has_from || use_from_as_prefix, from_key, has_to, to_key, has_from || use_from_as_prefix, from_key, has_to, to_key,
use_from_as_prefix); use_from_as_prefix);
if (!st.ok()) { if (!st.ok()) {
fprintf(stderr, "%s: %s\n", filename.c_str(), fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
st.ToString().c_str());
} }
total_read += dumper.GetReadNumber(); total_read += dumper.GetReadNumber();
if (read_num > 0 && total_read > read_num) { if (read_num > 0 && total_read > read_num) {

@ -208,13 +208,16 @@ class WriteStress {
SystemClock::Default()->SleepForMicroseconds( SystemClock::Default()->SleepForMicroseconds(
static_cast<int>(FLAGS_prefix_mutate_period_sec * 1000 * 1000LL)); static_cast<int>(FLAGS_prefix_mutate_period_sec * 1000 * 1000LL));
if (dist(rng) < FLAGS_first_char_mutate_probability) { if (dist(rng) < FLAGS_first_char_mutate_probability) {
key_prefix_[0].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed); key_prefix_[0].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
} }
if (dist(rng) < FLAGS_second_char_mutate_probability) { if (dist(rng) < FLAGS_second_char_mutate_probability) {
key_prefix_[1].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed); key_prefix_[1].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
} }
if (dist(rng) < FLAGS_third_char_mutate_probability) { if (dist(rng) < FLAGS_third_char_mutate_probability) {
key_prefix_[2].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed); key_prefix_[2].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
} }
} }
} }

Loading…
Cancel
Save