Run clang format against files under tools/ and db_stress_tool/ (#10868)

Summary:
Some lines of .h and .cc files are not properly fomatted. Clear them up with clang format.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10868

Test Plan: Watch existing CI to pass

Reviewed By: ajkr

Differential Revision: D40683485

fbshipit-source-id: 491fbb78b2cdcb948164f306829909ad816d5d0b
main
sdong 2 years ago committed by Facebook GitHub Bot
parent 95a1935cb1
commit 48fe921754
  1. 4
      db_stress_tool/batched_ops_stress.cc
  2. 4
      db_stress_tool/db_stress_common.h
  3. 4
      db_stress_tool/db_stress_shared_state.h
  4. 1
      tools/blob_dump.cc
  5. 14
      tools/block_cache_analyzer/block_cache_trace_analyzer.cc
  6. 3
      tools/block_cache_analyzer/block_cache_trace_analyzer.h
  7. 488
      tools/db_bench_tool.cc
  8. 12
      tools/db_sanity_test.cc
  9. 3
      tools/dump/db_dump_tool.cc
  10. 33
      tools/ldb_cmd.cc
  11. 5
      tools/ldb_cmd_impl.h
  12. 2
      tools/ldb_cmd_test.cc
  13. 1
      tools/ldb_tool.cc
  14. 3
      tools/simulated_hybrid_file_system.cc
  15. 12
      tools/sst_dump_tool.cc
  16. 9
      tools/write_stress.cc

@ -188,8 +188,8 @@ class BatchedOpsStressTest : public StressTest {
const std::vector<int64_t>& rand_keys) override {
size_t num_keys = rand_keys.size();
std::vector<Status> ret_status(num_keys);
std::array<std::string, 10> keys = {{"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"}};
std::array<std::string, 10> keys = {
{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}};
size_t num_prefixes = keys.size();
for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
std::vector<Slice> key_slices;

@ -509,8 +509,8 @@ extern inline std::string Key(int64_t val) {
if (offset < weight) {
// Use the bottom 3 bits of offset as the number of trailing 'x's in the
// key. If the next key is going to be of the next level, then skip the
// trailer as it would break ordering. If the key length is already at max,
// skip the trailer.
// trailer as it would break ordering. If the key length is already at
// max, skip the trailer.
if (offset < weight - 1 && level < levels - 1) {
size_t trailer_len = offset & 0x7;
key.append(trailer_len, 'x');

@ -333,9 +333,7 @@ class SharedState {
uint64_t GetStartTimestamp() const { return start_timestamp_; }
private:
static void IgnoreReadErrorCallback(void*) {
ignore_read_error = true;
}
static void IgnoreReadErrorCallback(void*) { ignore_read_error = true; }
// Pick random keys in each column family that will not experience overwrite.
std::unordered_set<int64_t> GenerateNoOverwriteIds() const {

@ -5,6 +5,7 @@
#ifndef ROCKSDB_LITE
#include <getopt.h>
#include <cstdio>
#include <string>
#include <unordered_map>

@ -1175,7 +1175,8 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime(
}
void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
const uint64_t reuse_window, bool user_access_only, TraceType block_type) const {
const uint64_t reuse_window, bool user_access_only,
TraceType block_type) const {
// A map from block key to an array of bools that states whether a block is
// accessed in a time window.
std::map<uint64_t, std::vector<bool>> block_accessed;
@ -1214,7 +1215,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
TraverseBlocks(block_callback);
// A cell is the number of blocks accessed in a reuse window.
std::unique_ptr<uint64_t[]> reuse_table(new uint64_t[reuse_vector_size * reuse_vector_size]);
std::unique_ptr<uint64_t[]> reuse_table(
new uint64_t[reuse_vector_size * reuse_vector_size]);
for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
// Initialize the reuse_table.
for (uint64_t i = 0; i < reuse_vector_size; i++) {
@ -1255,7 +1257,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
if (j < start_time) {
row += "100.0";
} else {
row += std::to_string(percent(reuse_table[start_time * reuse_vector_size + j],
row += std::to_string(
percent(reuse_table[start_time * reuse_vector_size + j],
reuse_table[start_time * reuse_vector_size + start_time]));
}
}
@ -1811,8 +1814,9 @@ void BlockCacheTraceAnalyzer::PrintDataBlockAccessStats() const {
return;
}
// Use four decimal points.
uint64_t percent_referenced_for_existing_keys = (uint64_t)(
((double)block.key_num_access_map.size() / (double)block.num_keys) *
uint64_t percent_referenced_for_existing_keys =
(uint64_t)(((double)block.key_num_access_map.size() /
(double)block.num_keys) *
10000.0);
uint64_t percent_referenced_for_non_existing_keys =
(uint64_t)(((double)block.non_exist_key_num_access_map.size() /

@ -292,7 +292,8 @@ class BlockCacheTraceAnalyzer {
// The file is named
// "block_type_user_access_only_reuse_window_reuse_timeline". The file format
// is start_time,0,1,...,N where N equals trace_duration / reuse_window.
void WriteBlockReuseTimeline(const uint64_t reuse_window, bool user_access_only,
void WriteBlockReuseTimeline(const uint64_t reuse_window,
bool user_access_only,
TraceType block_type) const;
// Write the Get spatical locality into csv files saved in 'output_dir'.

@ -289,10 +289,12 @@ DEFINE_string(column_family_distribution, "",
"and `num_hot_column_families=0`, a valid list could be "
"\"10,20,30,40\".");
DEFINE_int64(reads, -1, "Number of read operations to do. "
DEFINE_int64(reads, -1,
"Number of read operations to do. "
"If negative, do FLAGS_num reads.");
DEFINE_int64(deletes, -1, "Number of delete operations to do. "
DEFINE_int64(deletes, -1,
"Number of delete operations to do. "
"If negative, do FLAGS_num deletions.");
DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality");
@ -304,7 +306,8 @@ static int64_t seed_base;
DEFINE_int32(threads, 1, "Number of concurrent threads to run.");
DEFINE_int32(duration, 0, "Time in seconds for the random-ops tests to run."
DEFINE_int32(duration, 0,
"Time in seconds for the random-ops tests to run."
" When 0 then num & reads determine the test duration");
DEFINE_string(value_size_distribution_type, "fixed",
@ -357,8 +360,9 @@ DEFINE_int32(user_timestamp_size, 0,
DEFINE_int32(num_multi_db, 0,
"Number of DBs used in the benchmark. 0 means single DB.");
DEFINE_double(compression_ratio, 0.5, "Arrange to generate values that shrink"
" to this fraction of their original size after compression");
DEFINE_double(compression_ratio, 0.5,
"Arrange to generate values that shrink to this fraction of "
"their original size after compression");
DEFINE_double(
overwrite_probability, 0.0,
@ -514,9 +518,8 @@ DEFINE_int32(max_background_compactions,
DEFINE_uint64(subcompactions, 1,
"Maximum number of subcompactions to divide L0-L1 compactions "
"into.");
static const bool FLAGS_subcompactions_dummy
__attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_subcompactions,
&ValidateUint32Range);
static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range);
DEFINE_int32(max_background_flushes,
ROCKSDB_NAMESPACE::Options().max_background_flushes,
@ -537,11 +540,13 @@ DEFINE_int32(universal_size_ratio, 0,
"Percentage flexibility while comparing file size "
"(for universal compaction only).");
DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files in a"
" single compaction run (for universal compaction only).");
DEFINE_int32(universal_min_merge_width, 0,
"The minimum number of files in a single compaction run "
"(for universal compaction only).");
DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact"
" in universal style compaction");
DEFINE_int32(universal_max_merge_width, 0,
"The max number of files to compact in universal style "
"compaction");
DEFINE_int32(universal_max_size_amplification_percent, 0,
"The max size amplification for universal style compaction");
@ -747,9 +752,10 @@ DEFINE_bool(whole_key_filtering,
ROCKSDB_NAMESPACE::BlockBasedTableOptions().whole_key_filtering,
"Use whole keys (in addition to prefixes) in SST bloom filter.");
DEFINE_bool(use_existing_db, false, "If true, do not destroy the existing"
" database. If you set this flag and also specify a benchmark that"
" wants a fresh database, that benchmark will fail.");
DEFINE_bool(use_existing_db, false,
"If true, do not destroy the existing database. If you set this "
"flag and also specify a benchmark that wants a fresh database, "
"that benchmark will fail.");
DEFINE_bool(use_existing_keys, false,
"If true, uses existing keys in the DB, "
@ -787,16 +793,15 @@ DEFINE_bool(use_keep_filter, false, "Whether to use a noop compaction filter");
static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) {
if (value >= 20) {
fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n",
flagname, value);
fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", flagname,
value);
return false;
}
return true;
}
DEFINE_bool(verify_checksum, true,
"Verify checksum for every block read"
" from storage");
"Verify checksum for every block read from storage");
DEFINE_int32(checksum_type,
ROCKSDB_NAMESPACE::BlockBasedTableOptions().checksum,
@ -808,10 +813,11 @@ DEFINE_int32(stats_level, ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers,
DEFINE_string(statistics_string, "", "Serialized statistics string");
static class std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats;
DEFINE_int64(writes, -1, "Number of write operations to do. If negative, do"
" --num reads.");
DEFINE_int64(writes, -1,
"Number of write operations to do. If negative, do --num reads.");
DEFINE_bool(finish_after_writes, false, "Write thread terminates after all writes are finished");
DEFINE_bool(finish_after_writes, false,
"Write thread terminates after all writes are finished");
DEFINE_bool(sync, false, "Sync all writes to disk");
@ -877,24 +883,27 @@ DEFINE_uint64(ttl_seconds, ROCKSDB_NAMESPACE::Options().ttl, "Set options.ttl");
static bool ValidateInt32Percent(const char* flagname, int32_t value) {
if (value <= 0 || value >= 100) {
fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n",
flagname, value);
fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", flagname,
value);
return false;
}
return true;
}
DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed"
" as percentage) for the ReadRandomWriteRandom workload. The "
"default value 90 means 90% operations out of all reads and writes"
" operations are reads. In other words, 9 gets for every 1 put.");
DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed"
" as percentage) for the ReadRandomMergeRandom workload. The"
" default value 70 means 70% out of all read and merge operations"
" are merges. In other words, 7 merges for every 3 gets.");
DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/"
"deletes (used in RandomWithVerify only). RandomWithVerify "
DEFINE_int32(readwritepercent, 90,
"Ratio of reads to reads/writes (expressed as percentage) for "
"the ReadRandomWriteRandom workload. The default value 90 means "
"90% operations out of all reads and writes operations are "
"reads. In other words, 9 gets for every 1 put.");
DEFINE_int32(mergereadpercent, 70,
"Ratio of merges to merges&reads (expressed as percentage) for "
"the ReadRandomMergeRandom workload. The default value 70 means "
"70% out of all read and merge operations are merges. In other "
"words, 7 merges for every 3 gets.");
DEFINE_int32(deletepercent, 2,
"Percentage of deletes out of reads/writes/deletes (used in "
"RandomWithVerify only). RandomWithVerify "
"calculates writepercent as (100 - FLAGS_readwritepercent - "
"deletepercent), so deletepercent must be smaller than (100 - "
"FLAGS_readwritepercent)");
@ -1304,7 +1313,8 @@ DEFINE_int32(compression_zstd_max_train_bytes,
"Maximum size of training data passed to zstd's dictionary "
"trainer.");
DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts"
DEFINE_int32(min_level_to_compress, -1,
"If non-negative, compression starts"
" from this level. Levels with number < min_level_to_compress are"
" not compressed. Otherwise, apply compression_type to "
"all levels.");
@ -1342,8 +1352,8 @@ DEFINE_string(fs_uri, "",
#endif // ROCKSDB_LITE
DEFINE_string(simulate_hybrid_fs_file, "",
"File for Store Metadata for Simulate hybrid FS. Empty means "
"disable the feature. Now, if it is set, "
"last_level_temperature is set to kWarm.");
"disable the feature. Now, if it is set, last_level_temperature "
"is set to kWarm.");
DEFINE_int32(simulate_hybrid_hdd_multipliers, 1,
"In simulate_hybrid_fs_file or simulate_hdd mode, how many HDDs "
"are simulated.");
@ -1360,18 +1370,21 @@ static std::shared_ptr<ROCKSDB_NAMESPACE::Env> env_guard;
static ROCKSDB_NAMESPACE::Env* FLAGS_env = ROCKSDB_NAMESPACE::Env::Default();
DEFINE_int64(stats_interval, 0, "Stats are reported every N operations when "
"this is greater than zero. When 0 the interval grows over time.");
DEFINE_int64(stats_interval, 0,
"Stats are reported every N operations when this is greater than "
"zero. When 0 the interval grows over time.");
DEFINE_int64(stats_interval_seconds, 0, "Report stats every N seconds. This "
"overrides stats_interval when both are > 0.");
DEFINE_int64(stats_interval_seconds, 0,
"Report stats every N seconds. This overrides stats_interval when"
" both are > 0.");
DEFINE_int32(stats_per_interval, 0, "Reports additional stats per interval when"
" this is greater than 0.");
DEFINE_int32(stats_per_interval, 0,
"Reports additional stats per interval when this is greater than "
"0.");
DEFINE_uint64(slow_usecs, 1000000,
"A message is printed for operations that "
"take at least this many microseconds.");
"A message is printed for operations that take at least this "
"many microseconds.");
DEFINE_int64(report_interval_seconds, 0,
"If greater than zero, it will write simple stats in CSV format "
@ -1441,24 +1454,19 @@ DEFINE_bool(rate_limiter_auto_tuned, false,
"Enable dynamic adjustment of rate limit according to demand for "
"background I/O");
DEFINE_bool(sine_write_rate, false, "Use a sine wave write_rate_limit");
DEFINE_bool(sine_write_rate, false,
"Use a sine wave write_rate_limit");
DEFINE_uint64(sine_write_rate_interval_milliseconds, 10000,
DEFINE_uint64(
sine_write_rate_interval_milliseconds, 10000,
"Interval of which the sine wave write_rate_limit is recalculated");
DEFINE_double(sine_a, 1,
"A in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_a, 1, "A in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_b, 1,
"B in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_b, 1, "B in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_c, 0,
"C in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_c, 0, "C in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_d, 1,
"D in f(x) = A sin(bx + c) + d");
DEFINE_double(sine_d, 1, "D in f(x) = A sin(bx + c) + d");
DEFINE_bool(rate_limit_bg_reads, false,
"Use options.rate_limiter on compaction reads");
@ -1548,8 +1556,8 @@ DEFINE_bool(print_malloc_stats, false,
DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions");
DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds.");
DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files"
" in MB.");
DEFINE_uint64(wal_size_limit_MB, 0,
"Set the size limit for the WAL Files in MB.");
DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size");
DEFINE_bool(mmap_read, ROCKSDB_NAMESPACE::Options().allow_mmap_reads,
@ -1616,8 +1624,9 @@ DEFINE_int32(num_deletion_threads, 1,
"Number of threads to do deletion (used in TimeSeries and delete "
"expire_style only).");
DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge"
" operations on a key in the memtable");
DEFINE_int32(max_successive_merges, 0,
"Maximum number of successive merge operations on a key in the "
"memtable");
static bool ValidatePrefixSize(const char* flagname, int32_t value) {
if (value < 0 || value >= 2000000000) {
@ -1628,11 +1637,12 @@ static bool ValidatePrefixSize(const char* flagname, int32_t value) {
return true;
}
DEFINE_int32(prefix_size, 0, "control the prefix size for HashSkipList and "
"plain table");
DEFINE_int64(keys_per_prefix, 0, "control average number of keys generated "
"per prefix, 0 means no special handling of the prefix, "
"i.e. use the prefix comes with the generated random number.");
DEFINE_int32(prefix_size, 0,
"control the prefix size for HashSkipList and plain table");
DEFINE_int64(keys_per_prefix, 0,
"control average number of keys generated per prefix, 0 means no "
"special handling of the prefix, i.e. use the prefix comes with "
"the generated random number.");
DEFINE_bool(total_order_seek, false,
"Enable total order seek regardless of index format.");
DEFINE_bool(prefix_same_as_start, false,
@ -1644,13 +1654,13 @@ DEFINE_bool(
DEFINE_int32(memtable_insert_with_hint_prefix_size, 0,
"If non-zero, enable "
"memtable insert with hint with the given prefix size.");
DEFINE_bool(enable_io_prio, false, "Lower the background flush/compaction "
"threads' IO priority");
DEFINE_bool(enable_cpu_prio, false, "Lower the background flush/compaction "
"threads' CPU priority");
DEFINE_bool(identity_as_first_hash, false, "the first hash function of cuckoo "
"table becomes an identity function. This is only valid when key "
"is 8 bytes");
DEFINE_bool(enable_io_prio, false,
"Lower the background flush/compaction threads' IO priority");
DEFINE_bool(enable_cpu_prio, false,
"Lower the background flush/compaction threads' CPU priority");
DEFINE_bool(identity_as_first_hash, false,
"the first hash function of cuckoo table becomes an identity "
"function. This is only valid when key is 8 bytes");
DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG ");
DEFINE_uint64(stats_dump_period_sec,
ROCKSDB_NAMESPACE::Options().stats_dump_period_sec,
@ -1673,22 +1683,23 @@ DEFINE_bool(multiread_batched, false, "Use the new MultiGet API");
DEFINE_string(memtablerep, "skip_list", "");
DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count");
DEFINE_bool(use_plain_table, false, "if use plain table "
"instead of block-based table format");
DEFINE_bool(use_plain_table, false,
"if use plain table instead of block-based table format");
DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format");
DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table.");
DEFINE_bool(use_hash_search, false, "if use kHashSearch "
"instead of kBinarySearch. "
DEFINE_bool(use_hash_search, false,
"if use kHashSearch instead of kBinarySearch. "
"This is valid if only we use BlockTable");
DEFINE_string(merge_operator, "", "The merge operator to use with the database."
DEFINE_string(merge_operator, "",
"The merge operator to use with the database."
"If a new merge operator is specified, be sure to use fresh"
" database The possible merge operators are defined in"
" utilities/merge_operators.h");
DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try "
"linear search first for this many steps from the previous "
"position");
DEFINE_bool(report_file_operations, false, "if report number of file "
"operations");
DEFINE_int32(skip_list_lookahead, 0,
"Used with skip_list memtablerep; try linear search first for "
"this many steps from the previous position");
DEFINE_bool(report_file_operations, false,
"if report number of file operations");
DEFINE_bool(report_open_timing, false, "if report open timing");
DEFINE_int32(readahead_size, 0, "Iterator readahead size");
@ -1724,9 +1735,9 @@ DEFINE_bool(allow_data_in_errors,
static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent);
static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_table_cache_numshardbits,
&ValidateTableCacheNumshardbits);
static const bool FLAGS_table_cache_numshardbits_dummy
__attribute__((__unused__)) = RegisterFlagValidator(
&FLAGS_table_cache_numshardbits, &ValidateTableCacheNumshardbits);
DEFINE_uint32(write_batch_protection_bytes_per_key, 0,
"Size of per-key-value checksum in each write batch. Currently "
@ -1775,11 +1786,7 @@ static Status CreateMemTableRepFactory(
} // namespace
enum DistributionType : unsigned char {
kFixed = 0,
kUniform,
kNormal
};
enum DistributionType : unsigned char { kFixed = 0, kUniform, kNormal };
static enum DistributionType FLAGS_value_size_distribution_type_e = kFixed;
@ -1811,33 +1818,27 @@ class BaseDistribution {
}
return val;
}
private:
virtual unsigned int Get() = 0;
virtual bool NeedTruncate() {
return true;
}
virtual bool NeedTruncate() { return true; }
unsigned int min_value_size_;
unsigned int max_value_size_;
};
class FixedDistribution : public BaseDistribution
{
class FixedDistribution : public BaseDistribution {
public:
FixedDistribution(unsigned int size) :
BaseDistribution(size, size),
size_(size) {}
FixedDistribution(unsigned int size)
: BaseDistribution(size, size), size_(size) {}
private:
virtual unsigned int Get() override {
return size_;
}
virtual bool NeedTruncate() override {
return false;
}
virtual unsigned int Get() override { return size_; }
virtual bool NeedTruncate() override { return false; }
unsigned int size_;
};
class NormalDistribution
: public BaseDistribution, public std::normal_distribution<double> {
class NormalDistribution : public BaseDistribution,
public std::normal_distribution<double> {
public:
NormalDistribution(unsigned int _min, unsigned int _max)
: BaseDistribution(_min, _max),
@ -1855,8 +1856,7 @@ class NormalDistribution
std::mt19937 gen_;
};
class UniformDistribution
: public BaseDistribution,
class UniformDistribution : public BaseDistribution,
public std::uniform_int_distribution<unsigned int> {
public:
UniformDistribution(unsigned int _min, unsigned int _max)
@ -1865,12 +1865,8 @@ class UniformDistribution
gen_(rd_()) {}
private:
virtual unsigned int Get() override {
return (*this)(gen_);
}
virtual bool NeedTruncate() override {
return false;
}
virtual unsigned int Get() override { return (*this)(gen_); }
virtual bool NeedTruncate() override { return false; }
std::random_device rd_;
std::mt19937 gen_;
};
@ -1883,7 +1879,6 @@ class RandomGenerator {
std::unique_ptr<BaseDistribution> dist_;
public:
RandomGenerator() {
auto max_value_size = FLAGS_value_size_max;
switch (FLAGS_value_size_distribution_type_e) {
@ -1892,8 +1887,8 @@ class RandomGenerator {
FLAGS_value_size_max));
break;
case kNormal:
dist_.reset(new NormalDistribution(FLAGS_value_size_min,
FLAGS_value_size_max));
dist_.reset(
new NormalDistribution(FLAGS_value_size_min, FLAGS_value_size_max));
break;
case kFixed:
default:
@ -1955,7 +1950,8 @@ struct DBWithColumnFamilies {
DBWithColumnFamilies()
: db(nullptr)
#ifndef ROCKSDB_LITE
, opt_txn_db(nullptr)
,
opt_txn_db(nullptr)
#endif // ROCKSDB_LITE
{
cfh.clear();
@ -2138,19 +2134,12 @@ enum OperationType : unsigned char {
};
static std::unordered_map<OperationType, std::string, std::hash<unsigned char>>
OperationTypeString = {
{kRead, "read"},
{kWrite, "write"},
{kDelete, "delete"},
{kSeek, "seek"},
{kMerge, "merge"},
{kUpdate, "update"},
{kCompress, "compress"},
{kCompress, "uncompress"},
{kCrc, "crc"},
{kHash, "hash"},
{kOthers, "op"}
};
OperationTypeString = {{kRead, "read"}, {kWrite, "write"},
{kDelete, "delete"}, {kSeek, "seek"},
{kMerge, "merge"}, {kUpdate, "update"},
{kCompress, "compress"}, {kCompress, "uncompress"},
{kCrc, "crc"}, {kHash, "hash"},
{kOthers, "op"}};
class CombinedStats;
class Stats {
@ -2168,7 +2157,8 @@ class Stats {
uint64_t last_op_finish_;
uint64_t last_report_finish_;
std::unordered_map<OperationType, std::shared_ptr<HistogramImpl>,
std::hash<unsigned char>> hist_;
std::hash<unsigned char>>
hist_;
std::string message_;
bool exclude_from_merge_;
ReporterAgent* reporter_agent_; // does not own
@ -2200,8 +2190,7 @@ class Stats {
}
void Merge(const Stats& other) {
if (other.exclude_from_merge_)
return;
if (other.exclude_from_merge_) return;
for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) {
auto this_it = hist_.find(it->first);
@ -2227,9 +2216,7 @@ class Stats {
seconds_ = (finish_ - start_) * 1e-6;
}
void AddMessage(Slice msg) {
AppendWithSpace(&message_, msg);
}
void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
void SetId(int id) { id_ = id; }
void SetExcludeFromMerge() { exclude_from_merge_ = true; }
@ -2238,9 +2225,9 @@ class Stats {
std::vector<ThreadStatus> thread_list;
FLAGS_env->GetThreadList(&thread_list);
fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n",
"ThreadID", "ThreadType", "cfName", "Operation",
"ElapsedTime", "Stage", "State", "OperationProperties");
fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", "ThreadID",
"ThreadType", "cfName", "Operation", "ElapsedTime", "Stage",
"State", "OperationProperties");
int64_t current_time = 0;
clock_->GetCurrentTime(&current_time).PermitUncheckedError();
@ -2257,8 +2244,8 @@ class Stats {
auto op_properties = ThreadStatus::InterpretOperationProperties(
ts.operation_type, ts.op_properties);
for (const auto& op_prop : op_properties) {
fprintf(stderr, " %s %" PRIu64" |",
op_prop.first.c_str(), op_prop.second);
fprintf(stderr, " %s %" PRIu64 " |", op_prop.first.c_str(),
op_prop.second);
}
fprintf(stderr, "\n");
}
@ -2266,13 +2253,9 @@ class Stats {
void ResetSineInterval() { sine_interval_ = clock_->NowMicros(); }
uint64_t GetSineInterval() {
return sine_interval_;
}
uint64_t GetSineInterval() { return sine_interval_; }
uint64_t GetStart() {
return start_;
}
uint64_t GetStart() { return start_; }
void ResetLastOpTime() {
// Set to now to avoid latency from calls to SleepForMicroseconds.
@ -2288,8 +2271,7 @@ class Stats {
uint64_t now = clock_->NowMicros();
uint64_t micros = now - last_op_finish_;
if (hist_.find(op_type) == hist_.end())
{
if (hist_.find(op_type) == hist_.end()) {
auto hist_temp = std::make_shared<HistogramImpl>();
hist_.insert({op_type, std::move(hist_temp)});
}
@ -2305,13 +2287,20 @@ class Stats {
done_ += num_ops;
if (done_ >= next_report_ && FLAGS_progress_reports) {
if (!FLAGS_stats_interval) {
if (next_report_ < 1000) next_report_ += 100;
else if (next_report_ < 5000) next_report_ += 500;
else if (next_report_ < 10000) next_report_ += 1000;
else if (next_report_ < 50000) next_report_ += 5000;
else if (next_report_ < 100000) next_report_ += 10000;
else if (next_report_ < 500000) next_report_ += 50000;
else next_report_ += 100000;
if (next_report_ < 1000)
next_report_ += 100;
else if (next_report_ < 5000)
next_report_ += 500;
else if (next_report_ < 10000)
next_report_ += 1000;
else if (next_report_ < 50000)
next_report_ += 5000;
else if (next_report_ < 100000)
next_report_ += 10000;
else if (next_report_ < 500000)
next_report_ += 50000;
else
next_report_ += 100000;
fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, "");
} else {
uint64_t now = clock_->NowMicros();
@ -2397,9 +2386,7 @@ class Stats {
}
}
void AddBytes(int64_t n) {
bytes_ += n;
}
void AddBytes(int64_t n) { bytes_ += n; }
void Report(const Slice& name) {
// Pretend at least one op was done in case we are running a benchmark
@ -2810,28 +2797,30 @@ class Benchmark {
FLAGS_key_size, FLAGS_user_timestamp_size);
auto avg_value_size = FLAGS_value_size;
if (FLAGS_value_size_distribution_type_e == kFixed) {
fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n",
fprintf(stdout,
"Values: %d bytes each (%d bytes after compression)\n",
avg_value_size,
static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5));
} else {
avg_value_size = (FLAGS_value_size_min + FLAGS_value_size_max) / 2;
fprintf(stdout, "Values: %d avg bytes each (%d bytes after compression)\n",
fprintf(stdout,
"Values: %d avg bytes each (%d bytes after compression)\n",
avg_value_size,
static_cast<int>(avg_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Values Distribution: %s (min: %d, max: %d)\n",
FLAGS_value_size_distribution_type.c_str(),
FLAGS_value_size_min, FLAGS_value_size_max);
FLAGS_value_size_distribution_type.c_str(), FLAGS_value_size_min,
FLAGS_value_size_max);
}
fprintf(stdout, "Entries: %" PRIu64 "\n", num_);
fprintf(stdout, "Prefix: %d bytes\n", FLAGS_prefix_size);
fprintf(stdout, "Keys per prefix: %" PRIu64 "\n", keys_per_prefix_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
((static_cast<int64_t>(FLAGS_key_size + avg_value_size) * num_)
/ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
(((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio)
* num_)
/ 1048576.0));
((static_cast<int64_t>(FLAGS_key_size + avg_value_size) * num_) /
1048576.0));
fprintf(
stdout, "FileSize: %.1f MB (estimated)\n",
(((FLAGS_key_size + avg_value_size * FLAGS_compression_ratio) * num_) /
1048576.0));
fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n",
FLAGS_benchmark_write_rate_limit);
fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n",
@ -2865,9 +2854,9 @@ class Benchmark {
void PrintWarnings(const char* compression) {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf(stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
);
fprintf(
stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@ -4230,7 +4219,8 @@ class Benchmark {
} else if ((FLAGS_prefix_size == 0) &&
(options.memtable_factory->IsInstanceOf("prefix_hash") ||
options.memtable_factory->IsInstanceOf("hash_linkedlist"))) {
fprintf(stderr, "prefix_size should be non-zero if PrefixHash or "
fprintf(stderr,
"prefix_size should be non-zero if PrefixHash or "
"HashLinkedList memtablerep is used\n");
exit(1);
}
@ -4272,8 +4262,8 @@ class Benchmark {
ROCKSDB_NAMESPACE::CuckooTableOptions table_options;
table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio;
table_options.identity_as_first_hash = FLAGS_identity_as_first_hash;
options.table_factory = std::shared_ptr<TableFactory>(
NewCuckooTableFactory(table_options));
options.table_factory =
std::shared_ptr<TableFactory>(NewCuckooTableFactory(table_options));
#else
fprintf(stderr, "Cuckoo table is not supported in lite mode\n");
exit(1);
@ -4546,8 +4536,7 @@ class Benchmark {
for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
options.compression_per_level[i] = kNoCompression;
}
for (int i = FLAGS_min_level_to_compress;
i < FLAGS_num_levels; i++) {
for (int i = FLAGS_min_level_to_compress; i < FLAGS_num_levels; i++) {
options.compression_per_level[i] = FLAGS_compression_type_e;
}
}
@ -4848,8 +4837,8 @@ class Benchmark {
}
#ifndef ROCKSDB_LITE
if (FLAGS_readonly) {
s = DB::OpenForReadOnly(options, db_name, column_families,
&db->cfh, &db->db);
s = DB::OpenForReadOnly(options, db_name, column_families, &db->cfh,
&db->db);
} else if (FLAGS_optimistic_transaction_db) {
s = OptimisticTransactionDB::Open(options, db_name, column_families,
&db->cfh, &db->opt_txn_db);
@ -4960,9 +4949,7 @@ class Benchmark {
}
}
enum WriteMode {
RANDOM, SEQUENTIAL, UNIQUE_RANDOM
};
enum WriteMode { RANDOM, SEQUENTIAL, UNIQUE_RANDOM };
void WriteSeqDeterministic(ThreadState* thread) {
DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL);
@ -4973,13 +4960,9 @@ class Benchmark {
UNIQUE_RANDOM);
}
void WriteSeq(ThreadState* thread) {
DoWrite(thread, SEQUENTIAL);
}
void WriteSeq(ThreadState* thread) { DoWrite(thread, SEQUENTIAL); }
void WriteRandom(ThreadState* thread) {
DoWrite(thread, RANDOM);
}
void WriteRandom(ThreadState* thread) { DoWrite(thread, RANDOM); }
void WriteUniqueRandom(ThreadState* thread) {
DoWrite(thread, UNIQUE_RANDOM);
@ -5033,9 +5016,7 @@ class Benchmark {
std::vector<uint64_t> values_;
};
DB* SelectDB(ThreadState* thread) {
return SelectDBWithCfh(thread)->db;
}
DB* SelectDB(ThreadState* thread) { return SelectDBWithCfh(thread)->db; }
DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) {
return SelectDBWithCfh(thread->rand.Next());
@ -5354,8 +5335,7 @@ class Benchmark {
// We use same rand_num as seed for key and column family so that we
// can deterministically find the cfh corresponding to a particular
// key while reading the key.
batch.Put(db_with_cfh->GetCfh(rand_num), key,
val);
batch.Put(db_with_cfh->GetCfh(rand_num), key, val);
}
batch_bytes += val.size() + key_size_ + user_timestamp_size_;
bytes += val.size() + key_size_ + user_timestamp_size_;
@ -5427,8 +5407,8 @@ class Benchmark {
}
if (thread->shared->write_rate_limiter.get() != nullptr) {
thread->shared->write_rate_limiter->Request(
batch_bytes, Env::IO_HIGH,
nullptr /* stats */, RateLimiter::OpType::kWrite);
batch_bytes, Env::IO_HIGH, nullptr /* stats */,
RateLimiter::OpType::kWrite);
// Set time at which last op finished to Now() to hide latency and
// sleep from rate limiter. Also, do the check once per batch, not
// once per write.
@ -5564,11 +5544,13 @@ class Benchmark {
continue;
}
}
writes_ /= static_cast<int64_t>(open_options_.max_bytes_for_level_multiplier);
writes_ /=
static_cast<int64_t>(open_options_.max_bytes_for_level_multiplier);
}
for (size_t i = 0; i < num_db; i++) {
if (sorted_runs[i].size() < num_levels - 1) {
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels);
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n",
num_levels);
exit(1);
}
}
@ -5579,12 +5561,13 @@ class Benchmark {
auto options = db->GetOptions();
MutableCFOptions mutable_cf_options(options);
for (size_t j = 0; j < sorted_runs[i].size(); j++) {
compactionOptions.output_file_size_limit =
MaxFileSizeForLevel(mutable_cf_options,
static_cast<int>(output_level), compaction_style);
compactionOptions.output_file_size_limit = MaxFileSizeForLevel(
mutable_cf_options, static_cast<int>(output_level),
compaction_style);
std::cout << sorted_runs[i][j].size() << std::endl;
db->CompactFiles(compactionOptions, {sorted_runs[i][j].back().name,
sorted_runs[i][j].front().name},
db->CompactFiles(
compactionOptions,
{sorted_runs[i][j].back().name, sorted_runs[i][j].front().name},
static_cast<int>(output_level - j) /*level*/);
}
}
@ -5616,11 +5599,13 @@ class Benchmark {
}
num_files_at_level0[i] = meta.levels[0].files.size();
}
writes_ = static_cast<int64_t>(writes_* static_cast<double>(100) / (ratio + 200));
writes_ = static_cast<int64_t>(writes_ * static_cast<double>(100) /
(ratio + 200));
}
for (size_t i = 0; i < num_db; i++) {
if (sorted_runs[i].size() < num_levels) {
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels);
fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n",
num_levels);
exit(1);
}
}
@ -5631,9 +5616,9 @@ class Benchmark {
auto options = db->GetOptions();
MutableCFOptions mutable_cf_options(options);
for (size_t j = 0; j < sorted_runs[i].size(); j++) {
compactionOptions.output_file_size_limit =
MaxFileSizeForLevel(mutable_cf_options,
static_cast<int>(output_level), compaction_style);
compactionOptions.output_file_size_limit = MaxFileSizeForLevel(
mutable_cf_options, static_cast<int>(output_level),
compaction_style);
db->CompactFiles(
compactionOptions,
{sorted_runs[i][j].back().name, sorted_runs[i][j].front().name},
@ -5765,7 +5750,9 @@ class Benchmark {
for (size_t k = 0; k < num_db; k++) {
auto db = db_list[k];
fprintf(stdout,
"---------------------- DB %" ROCKSDB_PRIszt " LSM ---------------------\n", k);
"---------------------- DB %" ROCKSDB_PRIszt
" LSM ---------------------\n",
k);
db->GetColumnFamilyMetaData(&meta);
for (auto& levelMeta : meta.levels) {
if (levelMeta.files.empty()) {
@ -5983,7 +5970,9 @@ class Benchmark {
} while (!duration.Done(100));
char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found, "
snprintf(msg, sizeof(msg),
"(%" PRIu64 " of %" PRIu64
" found, "
"issued %" PRIu64 " non-exist keys)\n",
found, read, nonexist);
@ -6119,8 +6108,8 @@ class Benchmark {
}
char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n",
found, read);
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found,
read);
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg);
@ -6219,8 +6208,8 @@ class Benchmark {
}
char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)",
found, read);
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", found,
read);
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg);
}
@ -6622,8 +6611,8 @@ class Benchmark {
} else if (query_type == 1) {
// the Put query
puts++;
int64_t val_size = ParetoCdfInversion(
u, FLAGS_value_theta, FLAGS_value_k, FLAGS_value_sigma);
int64_t val_size = ParetoCdfInversion(u, FLAGS_value_theta,
FLAGS_value_k, FLAGS_value_sigma);
if (val_size < 10) {
val_size = 10;
} else if (val_size > value_max) {
@ -6830,8 +6819,8 @@ class Benchmark {
}
char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n",
found, read);
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found,
read);
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg);
}
@ -6894,13 +6883,9 @@ class Benchmark {
}
}
void DeleteSeq(ThreadState* thread) {
DoDelete(thread, true);
}
void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
void DeleteRandom(ThreadState* thread) {
DoDelete(thread, false);
}
void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
void ReadWhileWriting(ThreadState* thread) {
if (thread->tid > 0) {
@ -7006,9 +6991,9 @@ class Benchmark {
thread->stats.FinishedOps(&db_, db_.db, 1, kWrite);
if (FLAGS_benchmark_write_rate_limit > 0) {
write_rate_limiter->Request(
key.size() + val.size(), Env::IO_HIGH,
nullptr /* stats */, RateLimiter::OpType::kWrite);
write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH,
nullptr /* stats */,
RateLimiter::OpType::kWrite);
}
if (writes_per_range_tombstone_ > 0 &&
@ -7132,7 +7117,6 @@ class Benchmark {
return s;
}
// Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V)
// in DB atomically i.e in a single batch. Also refer GetMany.
Status DeleteMany(DB* db, const WriteOptions& writeoptions,
@ -7282,8 +7266,8 @@ class Benchmark {
}
char msg[128];
snprintf(msg, sizeof(msg),
"( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" \
PRIu64 " found:%" PRIu64 ")",
"( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" PRIu64
" found:%" PRIu64 ")",
gets_done, puts_done, deletes_done, readwrites_, found);
thread->stats.AddMessage(msg);
}
@ -7357,8 +7341,9 @@ class Benchmark {
}
}
char msg[100];
snprintf(msg, sizeof(msg), "( reads:%" PRIu64 " writes:%" PRIu64 \
" total:%" PRIu64 " found:%" PRIu64 ")",
snprintf(msg, sizeof(msg),
"( reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64
" found:%" PRIu64 ")",
reads_done, writes_done, readwrites_, found);
thread->stats.AddMessage(msg);
}
@ -7422,8 +7407,8 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1, kUpdate);
}
char msg[100];
snprintf(msg, sizeof(msg),
"( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found);
snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")",
readwrites_, found);
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg);
}
@ -7466,7 +7451,8 @@ class Benchmark {
exit(1);
}
Slice value = gen.Generate(static_cast<unsigned int>(existing_value.size()));
Slice value =
gen.Generate(static_cast<unsigned int>(existing_value.size()));
std::string new_value;
if (status.ok()) {
@ -7490,8 +7476,8 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1);
}
char msg[100];
snprintf(msg, sizeof(msg),
"( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found);
snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")",
readwrites_, found);
thread->stats.AddMessage(msg);
}
@ -7592,12 +7578,10 @@ class Benchmark {
Slice val = gen.Generate();
if (FLAGS_num_column_families > 1) {
s = db_with_cfh->db->Merge(write_options_,
db_with_cfh->GetCfh(key_rand), key,
val);
db_with_cfh->GetCfh(key_rand), key, val);
} else {
s = db_with_cfh->db->Merge(write_options_,
db_with_cfh->db->DefaultColumnFamily(), key,
val);
s = db_with_cfh->db->Merge(
write_options_, db_with_cfh->db->DefaultColumnFamily(), key, val);
}
if (!s.ok()) {
@ -7650,8 +7634,7 @@ class Benchmark {
thread->stats.FinishedOps(nullptr, db, 1, kMerge);
} else {
Status s = db->Get(read_options_, key, &value);
if (value.length() > max_length)
max_length = value.length();
if (value.length() > max_length) max_length = value.length();
if (!s.ok() && !s.IsNotFound()) {
fprintf(stderr, "get error: %s\n", s.ToString().c_str());
@ -7920,9 +7903,8 @@ class Benchmark {
return;
}
Status s =
RandomTransactionInserter::Verify(db_.db,
static_cast<uint16_t>(FLAGS_transaction_sets));
Status s = RandomTransactionInserter::Verify(
db_.db, static_cast<uint16_t>(FLAGS_transaction_sets));
if (s.ok()) {
fprintf(stdout, "RandomTransactionVerify Success.\n");
@ -8142,9 +8124,9 @@ class Benchmark {
thread->stats.AddBytes(bytes);
if (FLAGS_benchmark_write_rate_limit > 0) {
write_rate_limiter->Request(
key.size() + val.size(), Env::IO_HIGH,
nullptr /* stats */, RateLimiter::OpType::kWrite);
write_rate_limiter->Request(key.size() + val.size(), Env::IO_HIGH,
nullptr /* stats */,
RateLimiter::OpType::kWrite);
}
}
}

@ -5,19 +5,19 @@
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <memory>
#include <vector>
#include "port/port.h"
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
#include "rocksdb/options.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/status.h"
#include "rocksdb/comparator.h"
#include "rocksdb/table.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/filter_policy.h"
#include "port/port.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {

@ -5,11 +5,12 @@
#ifndef ROCKSDB_LITE
#include "rocksdb/db_dump_tool.h"
#include <cinttypes>
#include <iostream>
#include "rocksdb/db.h"
#include "rocksdb/db_dump_tool.h"
#include "rocksdb/env.h"
#include "util/coding.h"

@ -122,7 +122,7 @@ void DumpSstFile(Options options, std::string filename, bool output_hex,
void DumpBlobFile(const std::string& filename, bool is_key_hex,
bool is_value_hex, bool dump_uncompressed_blobs);
};
}; // namespace
LDBCommand* LDBCommand::InitFromCmdLineArgs(
int argc, char const* const* argv, const Options& options,
@ -295,8 +295,7 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
parsed_params.flags);
} else if (parsed_params.cmd == CheckPointCommand::Name()) {
return new CheckPointCommand(parsed_params.cmd_params,
parsed_params.option_map,
parsed_params.flags);
parsed_params.option_map, parsed_params.flags);
} else if (parsed_params.cmd == RepairCommand::Name()) {
return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
parsed_params.flags);
@ -1373,7 +1372,6 @@ ManifestDumpCommand::ManifestDumpCommand(
}
void ManifestDumpCommand::DoCommand() {
std::string manifestfile;
if (!path_.empty()) {
@ -1860,11 +1858,10 @@ void InternalDumpCommand::DoCommand() {
s1 = 0;
row = ikey.Encode().ToString();
val = key_version.value;
for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
s1++;
for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
s1++;
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++;
for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++;
for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01';
j++)
rtype1 += row[j];
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
@ -2138,11 +2135,9 @@ void DBDumperCommand::DoDumpCommand() {
for (; iter->Valid(); iter->Next()) {
int rawtime = 0;
// If end marker was specified, we stop before it
if (!null_to_ && (iter->key().ToString() >= to_))
break;
if (!null_to_ && (iter->key().ToString() >= to_)) break;
// Terminate if maximum number of keys have been dumped
if (max_keys == 0)
break;
if (max_keys == 0) break;
if (is_db_ttl_) {
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter);
rawtime = it_ttl->ttl_timestamp();
@ -2176,7 +2171,6 @@ void DBDumperCommand::DoDumpCommand() {
s2 += s1;
rtype2 = rtype1;
}
}
if (count_only_) {
@ -2265,8 +2259,7 @@ void ReduceDBLevelsCommand::OverrideBaseCFOptions(
cf_opts->max_bytes_for_level_multiplier = 1;
}
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
int* levels) {
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) {
ImmutableDBOptions db_options(opt);
EnvOptions soptions;
std::shared_ptr<Cache> tc(
@ -2716,7 +2709,6 @@ WALDumperCommand::WALDumperCommand(
wal_file_ = itr->second;
}
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true);
@ -3253,7 +3245,8 @@ void DBQuerierCommand::Help(std::string& ret) {
ret.append(DBQuerierCommand::Name());
ret.append(" [--" + ARG_TTL + "]");
ret.append("\n");
ret.append(" Starts a REPL shell. Type help for list of available "
ret.append(
" Starts a REPL shell. Type help for list of available "
"commands.");
ret.append("\n");
}
@ -3315,8 +3308,8 @@ void DBQuerierCommand::DoCommand() {
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
s = db_->Get(read_options, GetCfHandle(), Slice(key), &value);
if (s.ok()) {
fprintf(stdout, "%s\n", PrintKeyValue(key, value,
is_key_hex_, is_value_hex_).c_str());
fprintf(stdout, "%s\n",
PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str());
} else {
if (s.IsNotFound()) {
fprintf(stdout, "Not found %s\n", tokens[1].c_str());

@ -5,13 +5,13 @@
#pragma once
#include "rocksdb/utilities/ldb_cmd.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "rocksdb/utilities/ldb_cmd.h"
namespace ROCKSDB_NAMESPACE {
class CompactorCommand : public LDBCommand {
@ -581,6 +581,7 @@ class CheckPointCommand : public LDBCommand {
static void Help(std::string& ret);
std::string checkpoint_dir_;
private:
static const std::string ARG_CHECKPOINT_DIR;
};

@ -26,9 +26,9 @@
#include "util/file_checksum_helper.h"
#include "util/random.h"
using std::map;
using std::string;
using std::vector;
using std::map;
namespace ROCKSDB_NAMESPACE {

@ -5,6 +5,7 @@
//
#ifndef ROCKSDB_LITE
#include "rocksdb/ldb_tool.h"
#include "rocksdb/utilities/ldb_cmd.h"
#include "tools/ldb_cmd_impl.h"

@ -6,13 +6,12 @@
#include "util/stop_watch.h"
#ifndef ROCKSDB_LITE
#include "tools/simulated_hybrid_file_system.h"
#include <algorithm>
#include <sstream>
#include <string>
#include "rocksdb/rate_limiter.h"
#include "tools/simulated_hybrid_file_system.h"
namespace ROCKSDB_NAMESPACE {

@ -259,9 +259,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
try {
in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key);
} catch (...) {
std::cerr << "ERROR: Invalid key input '"
<< in_key
<< "' Use 0x{hex representation of internal rocksdb key}" << std::endl;
std::cerr << "ERROR: Invalid key input '" << in_key
<< "' Use 0x{hex representation of internal rocksdb key}"
<< std::endl;
return -1;
}
Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key);
@ -337,7 +337,8 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
exit(1);
}
} else if (has_compression_level_from || has_compression_level_to) {
fprintf(stderr, "Specify both --compression_level_from and "
fprintf(stderr,
"Specify both --compression_level_from and "
"--compression_level_to.\n\n");
exit(1);
}
@ -476,8 +477,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
has_from || use_from_as_prefix, from_key, has_to, to_key,
use_from_as_prefix);
if (!st.ok()) {
fprintf(stderr, "%s: %s\n", filename.c_str(),
st.ToString().c_str());
fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
}
total_read += dumper.GetReadNumber();
if (read_num > 0 && total_read > read_num) {

@ -208,13 +208,16 @@ class WriteStress {
SystemClock::Default()->SleepForMicroseconds(
static_cast<int>(FLAGS_prefix_mutate_period_sec * 1000 * 1000LL));
if (dist(rng) < FLAGS_first_char_mutate_probability) {
key_prefix_[0].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[0].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
if (dist(rng) < FLAGS_second_char_mutate_probability) {
key_prefix_[1].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[1].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
if (dist(rng) < FLAGS_third_char_mutate_probability) {
key_prefix_[2].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[2].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
}
}

Loading…
Cancel
Save