Run clang format against files under tools/ and db_stress_tool/ (#10868)

Summary:
Some lines of .h and .cc files are not properly fomatted. Clear them up with clang format.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10868

Test Plan: Watch existing CI to pass

Reviewed By: ajkr

Differential Revision: D40683485

fbshipit-source-id: 491fbb78b2cdcb948164f306829909ad816d5d0b
main
sdong 2 years ago committed by Facebook GitHub Bot
parent 95a1935cb1
commit 48fe921754
  1. 4
      db_stress_tool/batched_ops_stress.cc
  2. 4
      db_stress_tool/db_stress_common.h
  3. 4
      db_stress_tool/db_stress_shared_state.h
  4. 1
      tools/blob_dump.cc
  5. 14
      tools/block_cache_analyzer/block_cache_trace_analyzer.cc
  6. 3
      tools/block_cache_analyzer/block_cache_trace_analyzer.h
  7. 524
      tools/db_bench_tool.cc
  8. 12
      tools/db_sanity_test.cc
  9. 3
      tools/dump/db_dump_tool.cc
  10. 95
      tools/ldb_cmd.cc
  11. 5
      tools/ldb_cmd_impl.h
  12. 4
      tools/ldb_cmd_test.cc
  13. 1
      tools/ldb_tool.cc
  14. 4
      tools/reduce_levels_test.cc
  15. 3
      tools/simulated_hybrid_file_system.cc
  16. 18
      tools/sst_dump_tool.cc
  17. 2
      tools/trace_analyzer_test.cc
  18. 9
      tools/write_stress.cc

@ -188,8 +188,8 @@ class BatchedOpsStressTest : public StressTest {
const std::vector<int64_t>& rand_keys) override {
size_t num_keys = rand_keys.size();
std::vector<Status> ret_status(num_keys);
std::array<std::string, 10> keys = {{"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"}};
std::array<std::string, 10> keys = {
{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}};
size_t num_prefixes = keys.size();
for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
std::vector<Slice> key_slices;

@ -509,8 +509,8 @@ extern inline std::string Key(int64_t val) {
if (offset < weight) {
// Use the bottom 3 bits of offset as the number of trailing 'x's in the
// key. If the next key is going to be of the next level, then skip the
// trailer as it would break ordering. If the key length is already at max,
// skip the trailer.
// trailer as it would break ordering. If the key length is already at
// max, skip the trailer.
if (offset < weight - 1 && level < levels - 1) {
size_t trailer_len = offset & 0x7;
key.append(trailer_len, 'x');

@ -333,9 +333,7 @@ class SharedState {
uint64_t GetStartTimestamp() const { return start_timestamp_; }
private:
static void IgnoreReadErrorCallback(void*) {
ignore_read_error = true;
}
static void IgnoreReadErrorCallback(void*) { ignore_read_error = true; }
// Pick random keys in each column family that will not experience overwrite.
std::unordered_set<int64_t> GenerateNoOverwriteIds() const {

@ -5,6 +5,7 @@
#ifndef ROCKSDB_LITE
#include <getopt.h>
#include <cstdio>
#include <string>
#include <unordered_map>

@ -1175,7 +1175,8 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime(
}
void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
const uint64_t reuse_window, bool user_access_only, TraceType block_type) const {
const uint64_t reuse_window, bool user_access_only,
TraceType block_type) const {
// A map from block key to an array of bools that states whether a block is
// accessed in a time window.
std::map<uint64_t, std::vector<bool>> block_accessed;
@ -1214,7 +1215,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
TraverseBlocks(block_callback);
// A cell is the number of blocks accessed in a reuse window.
std::unique_ptr<uint64_t[]> reuse_table(new uint64_t[reuse_vector_size * reuse_vector_size]);
std::unique_ptr<uint64_t[]> reuse_table(
new uint64_t[reuse_vector_size * reuse_vector_size]);
for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
// Initialize the reuse_table.
for (uint64_t i = 0; i < reuse_vector_size; i++) {
@ -1255,7 +1257,8 @@ void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
if (j < start_time) {
row += "100.0";
} else {
row += std::to_string(percent(reuse_table[start_time * reuse_vector_size + j],
row += std::to_string(
percent(reuse_table[start_time * reuse_vector_size + j],
reuse_table[start_time * reuse_vector_size + start_time]));
}
}
@ -1811,8 +1814,9 @@ void BlockCacheTraceAnalyzer::PrintDataBlockAccessStats() const {
return;
}
// Use four decimal points.
uint64_t percent_referenced_for_existing_keys = (uint64_t)(
((double)block.key_num_access_map.size() / (double)block.num_keys) *
uint64_t percent_referenced_for_existing_keys =
(uint64_t)(((double)block.key_num_access_map.size() /
(double)block.num_keys) *
10000.0);
uint64_t percent_referenced_for_non_existing_keys =
(uint64_t)(((double)block.non_exist_key_num_access_map.size() /

@ -292,7 +292,8 @@ class BlockCacheTraceAnalyzer {
// The file is named
// "block_type_user_access_only_reuse_window_reuse_timeline". The file format
// is start_time,0,1,...,N where N equals trace_duration / reuse_window.
void WriteBlockReuseTimeline(const uint64_t reuse_window, bool user_access_only,
void WriteBlockReuseTimeline(const uint64_t reuse_window,
bool user_access_only,
TraceType block_type) const;
// Write the Get spatical locality into csv files saved in 'output_dir'.

File diff suppressed because it is too large Load Diff

@ -5,19 +5,19 @@
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <memory>
#include <vector>
#include "port/port.h"
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
#include "rocksdb/options.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/status.h"
#include "rocksdb/comparator.h"
#include "rocksdb/table.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/filter_policy.h"
#include "port/port.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {

@ -5,11 +5,12 @@
#ifndef ROCKSDB_LITE
#include "rocksdb/db_dump_tool.h"
#include <cinttypes>
#include <iostream>
#include "rocksdb/db.h"
#include "rocksdb/db_dump_tool.h"
#include "rocksdb/env.h"
#include "util/coding.h"

@ -122,7 +122,7 @@ void DumpSstFile(Options options, std::string filename, bool output_hex,
void DumpBlobFile(const std::string& filename, bool is_key_hex,
bool is_value_hex, bool dump_uncompressed_blobs);
};
}; // namespace
LDBCommand* LDBCommand::InitFromCmdLineArgs(
int argc, char const* const* argv, const Options& options,
@ -165,7 +165,7 @@ LDBCommand* LDBCommand::InitFromCmdLineArgs(
const std::string OPTION_PREFIX = "--";
for (const auto& arg : args) {
if (arg[0] == '-' && arg[1] == '-'){
if (arg[0] == '-' && arg[1] == '-') {
std::vector<std::string> splits = StringSplit(arg, '=');
// --option_name=option_value
if (splits.size() == 2) {
@ -295,8 +295,7 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
parsed_params.flags);
} else if (parsed_params.cmd == CheckPointCommand::Name()) {
return new CheckPointCommand(parsed_params.cmd_params,
parsed_params.option_map,
parsed_params.flags);
parsed_params.option_map, parsed_params.flags);
} else if (parsed_params.cmd == RepairCommand::Name()) {
return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
parsed_params.flags);
@ -1284,7 +1283,7 @@ void DBLoaderCommand::DoCommand() {
} else if (0 == line.find("Created bg thread 0x")) {
// ignore this line
} else {
bad_lines ++;
bad_lines++;
}
}
@ -1373,7 +1372,6 @@ ManifestDumpCommand::ManifestDumpCommand(
}
void ManifestDumpCommand::DoCommand() {
std::string manifestfile;
if (!path_.empty()) {
@ -1739,7 +1737,7 @@ void PrintBucketCounts(const std::vector<uint64_t>& bucket_counts,
int ttl_start, int ttl_end, int bucket_size,
int num_buckets) {
int time_point = ttl_start;
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
for (int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
fprintf(stdout, "Keys in range %s to %s : %lu\n",
TimeToHumanString(time_point).c_str(),
TimeToHumanString(time_point + bucket_size).c_str(),
@ -1787,7 +1785,7 @@ InternalDumpCommand::InternalDumpCommand(
// fprintf(stdout,"delim = %c\n",delim_[0]);
} else {
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
delim_=".";
delim_ = ".";
}
print_stats_ = IsFlagPresent(flags, ARG_STATS);
@ -1841,8 +1839,8 @@ void InternalDumpCommand::DoCommand() {
}
std::string rtype1, rtype2, row, val;
rtype2 = "";
uint64_t c=0;
uint64_t s1=0,s2=0;
uint64_t c = 0;
uint64_t s1 = 0, s2 = 0;
long long count = 0;
for (auto& key_version : key_versions) {
@ -1857,25 +1855,24 @@ void InternalDumpCommand::DoCommand() {
int k;
if (count_delim_) {
rtype1 = "";
s1=0;
s1 = 0;
row = ikey.Encode().ToString();
val = key_version.value;
for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
s1++;
for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
s1++;
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
rtype1+=row[j];
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++;
for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++;
for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01';
j++)
rtype1 += row[j];
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
rtype2.c_str(), c, s2);
c=1;
s2=s1;
c = 1;
s2 = s1;
rtype2 = rtype1;
} else {
c++;
s2+=s1;
rtype2=rtype1;
s2 += s1;
rtype2 = rtype1;
}
}
@ -1901,7 +1898,7 @@ void InternalDumpCommand::DoCommand() {
// Terminate if maximum number of keys have been dumped
if (max_keys_ > 0 && count >= max_keys_) break;
}
if(count_delim_) {
if (count_delim_) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
rtype2.c_str(), c, s2);
} else {
@ -1966,7 +1963,7 @@ DBDumperCommand::DBDumperCommand(
count_delim_ = true;
} else {
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
delim_=".";
delim_ = ".";
}
print_stats_ = IsFlagPresent(flags, ARG_STATS);
@ -2116,11 +2113,11 @@ void DBDumperCommand::DoDumpCommand() {
bucket_size <= 0) {
bucket_size = time_range; // Will have just 1 bucket by default
}
//cretaing variables for row count of each type
// cretaing variables for row count of each type
std::string rtype1, rtype2, row, val;
rtype2 = "";
uint64_t c=0;
uint64_t s1=0,s2=0;
uint64_t c = 0;
uint64_t s1 = 0, s2 = 0;
// At this point, bucket_size=0 => time_range=0
int num_buckets = (bucket_size >= time_range)
@ -2138,11 +2135,9 @@ void DBDumperCommand::DoDumpCommand() {
for (; iter->Valid(); iter->Next()) {
int rawtime = 0;
// If end marker was specified, we stop before it
if (!null_to_ && (iter->key().ToString() >= to_))
break;
if (!null_to_ && (iter->key().ToString() >= to_)) break;
// Terminate if maximum number of keys have been dumped
if (max_keys == 0)
break;
if (max_keys == 0) break;
if (is_db_ttl_) {
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter);
rawtime = it_ttl->ttl_timestamp();
@ -2162,21 +2157,20 @@ void DBDumperCommand::DoDumpCommand() {
rtype1 = "";
row = iter->key().ToString();
val = iter->value().ToString();
s1 = row.size()+val.size();
for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++)
rtype1+=row[j];
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
s1 = row.size() + val.size();
for (int j = 0; row[j] != delim_[0] && row[j] != '\0'; j++)
rtype1 += row[j];
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
rtype2.c_str(), c, s2);
c=1;
s2=s1;
c = 1;
s2 = s1;
rtype2 = rtype1;
} else {
c++;
s2+=s1;
rtype2=rtype1;
s2 += s1;
rtype2 = rtype1;
}
}
if (count_only_) {
@ -2197,7 +2191,7 @@ void DBDumperCommand::DoDumpCommand() {
if (num_buckets > 1 && is_db_ttl_) {
PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
num_buckets);
} else if(count_delim_) {
} else if (count_delim_) {
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
rtype2.c_str(), c, s2);
} else {
@ -2228,7 +2222,7 @@ ReduceDBLevelsCommand::ReduceDBLevelsCommand(
ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
if(new_levels_ <= 0) {
if (new_levels_ <= 0) {
exec_state_ = LDBCommandExecuteResult::Failed(
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
}
@ -2240,7 +2234,7 @@ std::vector<std::string> ReduceDBLevelsCommand::PrepareArgs(
ret.push_back("reduce_levels");
ret.push_back("--" + ARG_DB + "=" + db_path);
ret.push_back("--" + ARG_NEW_LEVELS + "=" + std::to_string(new_levels));
if(print_old_level) {
if (print_old_level) {
ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
}
return ret;
@ -2265,8 +2259,7 @@ void ReduceDBLevelsCommand::OverrideBaseCFOptions(
cf_opts->max_bytes_for_level_multiplier = 1;
}
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
int* levels) {
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) {
ImmutableDBOptions db_options(opt);
EnvOptions soptions;
std::shared_ptr<Cache> tc(
@ -2716,7 +2709,6 @@ WALDumperCommand::WALDumperCommand(
wal_file_ = itr->second;
}
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true);
@ -3017,7 +3009,7 @@ void ScanCommand::DoCommand() {
TimeToHumanString(ttl_start).c_str(),
TimeToHumanString(ttl_end).c_str());
}
for ( ;
for (;
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
it->Next()) {
if (is_db_ttl_) {
@ -3253,7 +3245,8 @@ void DBQuerierCommand::Help(std::string& ret) {
ret.append(DBQuerierCommand::Name());
ret.append(" [--" + ARG_TTL + "]");
ret.append("\n");
ret.append(" Starts a REPL shell. Type help for list of available "
ret.append(
" Starts a REPL shell. Type help for list of available "
"commands.");
ret.append("\n");
}
@ -3281,7 +3274,7 @@ void DBQuerierCommand::DoCommand() {
if (pos2 == std::string::npos) {
break;
}
tokens.push_back(line.substr(pos, pos2-pos));
tokens.push_back(line.substr(pos, pos2 - pos));
pos = pos2 + 1;
}
tokens.push_back(line.substr(pos));
@ -3315,8 +3308,8 @@ void DBQuerierCommand::DoCommand() {
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
s = db_->Get(read_options, GetCfHandle(), Slice(key), &value);
if (s.ok()) {
fprintf(stdout, "%s\n", PrintKeyValue(key, value,
is_key_hex_, is_value_hex_).c_str());
fprintf(stdout, "%s\n",
PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str());
} else {
if (s.IsNotFound()) {
fprintf(stdout, "Not found %s\n", tokens[1].c_str());

@ -5,13 +5,13 @@
#pragma once
#include "rocksdb/utilities/ldb_cmd.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "rocksdb/utilities/ldb_cmd.h"
namespace ROCKSDB_NAMESPACE {
class CompactorCommand : public LDBCommand {
@ -581,6 +581,7 @@ class CheckPointCommand : public LDBCommand {
static void Help(std::string& ret);
std::string checkpoint_dir_;
private:
static const std::string ARG_CHECKPOINT_DIR;
};

@ -26,9 +26,9 @@
#include "util/file_checksum_helper.h"
#include "util/random.h"
using std::map;
using std::string;
using std::vector;
using std::map;
namespace ROCKSDB_NAMESPACE {
@ -70,7 +70,7 @@ TEST_F(LdbCmdTest, HexToString) {
auto actual = ROCKSDB_NAMESPACE::LDBCommand::HexToString(inPair.first);
auto expected = inPair.second;
for (unsigned int i = 0; i < actual.length(); i++) {
EXPECT_EQ(expected[i], static_cast<int>((signed char) actual[i]));
EXPECT_EQ(expected[i], static_cast<int>((signed char)actual[i]));
}
auto reverse = ROCKSDB_NAMESPACE::LDBCommand::StringToHex(actual);
EXPECT_STRCASEEQ(inPair.first.c_str(), reverse.c_str());

@ -5,6 +5,7 @@
//
#ifndef ROCKSDB_LITE
#include "rocksdb/ldb_tool.h"
#include "rocksdb/utilities/ldb_cmd.h"
#include "tools/ldb_cmd_impl.h"

@ -19,7 +19,7 @@
namespace ROCKSDB_NAMESPACE {
class ReduceLevelTest : public testing::Test {
public:
public:
ReduceLevelTest() {
dbname_ = test::PerThreadDBPath("db_reduce_levels_test");
EXPECT_OK(DestroyDB(dbname_, Options()));
@ -75,7 +75,7 @@ public:
return atoi(property.c_str());
}
private:
private:
std::string dbname_;
DB* db_;
};

@ -6,13 +6,12 @@
#include "util/stop_watch.h"
#ifndef ROCKSDB_LITE
#include "tools/simulated_hybrid_file_system.h"
#include <algorithm>
#include <sstream>
#include <string>
#include "rocksdb/rate_limiter.h"
#include "tools/simulated_hybrid_file_system.h"
namespace ROCKSDB_NAMESPACE {

@ -259,9 +259,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
try {
in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key);
} catch (...) {
std::cerr << "ERROR: Invalid key input '"
<< in_key
<< "' Use 0x{hex representation of internal rocksdb key}" << std::endl;
std::cerr << "ERROR: Invalid key input '" << in_key
<< "' Use 0x{hex representation of internal rocksdb key}"
<< std::endl;
return -1;
}
Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key);
@ -331,13 +331,14 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
}
}
if(has_compression_level_from && has_compression_level_to) {
if(!has_specified_compression_types || compression_types.size() != 1) {
if (has_compression_level_from && has_compression_level_to) {
if (!has_specified_compression_types || compression_types.size() != 1) {
fprintf(stderr, "Specify one compression type.\n\n");
exit(1);
}
} else if(has_compression_level_from || has_compression_level_to) {
fprintf(stderr, "Specify both --compression_level_from and "
} else if (has_compression_level_from || has_compression_level_to) {
fprintf(stderr,
"Specify both --compression_level_from and "
"--compression_level_to.\n\n");
exit(1);
}
@ -476,8 +477,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
has_from || use_from_as_prefix, from_key, has_to, to_key,
use_from_as_prefix);
if (!st.ok()) {
fprintf(stderr, "%s: %s\n", filename.c_str(),
st.ToString().c_str());
fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
}
total_read += dumper.GetReadNumber();
if (read_num > 0 && total_read > read_num) {

@ -111,7 +111,7 @@ class TraceAnalyzerTest : public testing::Test {
single_iter->SeekForPrev("b");
ASSERT_OK(single_iter->status());
delete single_iter;
std::this_thread::sleep_for (std::chrono::seconds(1));
std::this_thread::sleep_for(std::chrono::seconds(1));
db_->Get(ro, "g", &value).PermitUncheckedError();

@ -208,13 +208,16 @@ class WriteStress {
SystemClock::Default()->SleepForMicroseconds(
static_cast<int>(FLAGS_prefix_mutate_period_sec * 1000 * 1000LL));
if (dist(rng) < FLAGS_first_char_mutate_probability) {
key_prefix_[0].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[0].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
if (dist(rng) < FLAGS_second_char_mutate_probability) {
key_prefix_[1].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[1].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
if (dist(rng) < FLAGS_third_char_mutate_probability) {
key_prefix_[2].store(static_cast<char>(char_dist(rng)), std::memory_order_relaxed);
key_prefix_[2].store(static_cast<char>(char_dist(rng)),
std::memory_order_relaxed);
}
}
}

Loading…
Cancel
Save