Summary: 1. it doesn't work 2. we're not using it In the future, if we need general benchmark framework, we should probably use https://github.com/google/benchmark Test Plan: make all Reviewers: yhchiang, rven, anthony, sdong Reviewed By: sdong Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D36777main
parent
894e9f7454
commit
abb4052278
@ -1,96 +0,0 @@ |
|||||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
||||||
// This source code is licensed under the BSD-style license found in the
|
|
||||||
// LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
// of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
|
|
||||||
|
|
||||||
#include <vector> |
|
||||||
|
|
||||||
#ifndef __STDC_FORMAT_MACROS |
|
||||||
#define __STDC_FORMAT_MACROS |
|
||||||
#endif |
|
||||||
|
|
||||||
#include <inttypes.h> |
|
||||||
#include "util/testharness.h" |
|
||||||
#include "util/benchharness.h" |
|
||||||
#include "db/version_set.h" |
|
||||||
#include "db/write_controller.h" |
|
||||||
#include "db/writebuffer.h" |
|
||||||
#include "util/mutexlock.h" |
|
||||||
|
|
||||||
namespace rocksdb { |
|
||||||
|
|
||||||
std::string MakeKey(uint64_t num) { |
|
||||||
char buf[30]; |
|
||||||
snprintf(buf, sizeof(buf), "%016" PRIu64, num); |
|
||||||
return std::string(buf); |
|
||||||
} |
|
||||||
|
|
||||||
void BM_LogAndApply(int iters, int num_base_files) { |
|
||||||
VersionSet* vset; |
|
||||||
WriteController wc; |
|
||||||
ColumnFamilyData* default_cfd; |
|
||||||
uint64_t fnum = 1; |
|
||||||
InstrumentedMutex mu; |
|
||||||
InstrumentedMutexLock l(&mu); |
|
||||||
|
|
||||||
BENCHMARK_SUSPEND { |
|
||||||
std::string dbname = test::TmpDir() + "/rocksdb_test_benchmark"; |
|
||||||
ASSERT_OK(DestroyDB(dbname, Options())); |
|
||||||
|
|
||||||
DB* db = nullptr; |
|
||||||
Options opts; |
|
||||||
opts.create_if_missing = true; |
|
||||||
Status s = DB::Open(opts, dbname, &db); |
|
||||||
ASSERT_OK(s); |
|
||||||
ASSERT_TRUE(db != nullptr); |
|
||||||
|
|
||||||
delete db; |
|
||||||
db = nullptr; |
|
||||||
|
|
||||||
Options options; |
|
||||||
EnvOptions sopt; |
|
||||||
// Notice we are using the default options not through SanitizeOptions().
|
|
||||||
// We might want to initialize some options manually if needed.
|
|
||||||
options.db_paths.emplace_back(dbname, 0); |
|
||||||
WriteBuffer wb(options.db_write_buffer_size); |
|
||||||
// The parameter of table cache is passed in as null, so any file I/O
|
|
||||||
// operation is likely to fail.
|
|
||||||
vset = new VersionSet(dbname, &options, sopt, nullptr, &wb, &wc); |
|
||||||
std::vector<ColumnFamilyDescriptor> dummy; |
|
||||||
dummy.push_back(ColumnFamilyDescriptor()); |
|
||||||
ASSERT_OK(vset->Recover(dummy)); |
|
||||||
default_cfd = vset->GetColumnFamilySet()->GetDefault(); |
|
||||||
VersionEdit vbase; |
|
||||||
for (int i = 0; i < num_base_files; i++) { |
|
||||||
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); |
|
||||||
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); |
|
||||||
vbase.AddFile(2, ++fnum, 0, 1 /* file size */, start, limit, 1, 1); |
|
||||||
} |
|
||||||
ASSERT_OK(vset->LogAndApply(default_cfd, |
|
||||||
*default_cfd->GetLatestMutableCFOptions(), &vbase, &mu)); |
|
||||||
} |
|
||||||
|
|
||||||
for (int i = 0; i < iters; i++) { |
|
||||||
VersionEdit vedit; |
|
||||||
vedit.DeleteFile(2, fnum); |
|
||||||
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); |
|
||||||
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); |
|
||||||
vedit.AddFile(2, ++fnum, 0, 1 /* file size */, start, limit, 1, 1); |
|
||||||
vset->LogAndApply(default_cfd, *default_cfd->GetLatestMutableCFOptions(), |
|
||||||
&vedit, &mu); |
|
||||||
} |
|
||||||
delete vset; |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_NAMED_PARAM(BM_LogAndApply, 1000_iters_1_file, 1000, 1) |
|
||||||
BENCHMARK_NAMED_PARAM(BM_LogAndApply, 1000_iters_100_files, 1000, 100) |
|
||||||
BENCHMARK_NAMED_PARAM(BM_LogAndApply, 1000_iters_10000_files, 1000, 10000) |
|
||||||
BENCHMARK_NAMED_PARAM(BM_LogAndApply, 100_iters_100000_files, 100, 100000) |
|
||||||
|
|
||||||
} // namespace rocksdb
|
|
||||||
|
|
||||||
int main(int argc, char** argv) { |
|
||||||
rocksdb::benchmark::RunBenchmarks(); |
|
||||||
return 0; |
|
||||||
} |
|
@ -1,407 +0,0 @@ |
|||||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
||||||
// This source code is licensed under the BSD-style license found in the
|
|
||||||
// LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
// of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
//
|
|
||||||
// This code is derived from Benchmark.cpp implemented in Folly, the opensourced
|
|
||||||
// Facebook C++ library available at https://github.com/facebook/folly
|
|
||||||
// The code has removed any dependence on other folly and boost libraries
|
|
||||||
|
|
||||||
#include "util/benchharness.h" |
|
||||||
|
|
||||||
#include <algorithm> |
|
||||||
#include <cmath> |
|
||||||
#include <cstring> |
|
||||||
#include <limits> |
|
||||||
#include <string> |
|
||||||
#include <utility> |
|
||||||
#include <vector> |
|
||||||
#include "util/string_util.h" |
|
||||||
|
|
||||||
#ifndef GFLAGS |
|
||||||
bool FLAGS_benchmark = false; |
|
||||||
uint64_t FLAGS_bm_min_usec = 100; |
|
||||||
int64_t FLAGS_bm_min_iters = 1; |
|
||||||
int32_t FLAGS_bm_max_secs = 1; |
|
||||||
#else |
|
||||||
#include <gflags/gflags.h> |
|
||||||
DEFINE_bool(benchmark, false, "Run benchmarks."); |
|
||||||
|
|
||||||
DEFINE_uint64(bm_min_usec, 100, |
|
||||||
"Minimum # of microseconds we'll accept for each benchmark."); |
|
||||||
|
|
||||||
DEFINE_int64(bm_min_iters, 1, |
|
||||||
"Minimum # of iterations we'll try for each benchmark."); |
|
||||||
|
|
||||||
DEFINE_int32(bm_max_secs, 1, |
|
||||||
"Maximum # of seconds we'll spend on each benchmark."); |
|
||||||
#endif // GFLAGS
|
|
||||||
|
|
||||||
using std::function; |
|
||||||
using std::get; |
|
||||||
using std::make_pair; |
|
||||||
using std::max; |
|
||||||
using std::min; |
|
||||||
using std::pair; |
|
||||||
using std::sort; |
|
||||||
using std::string; |
|
||||||
using std::tuple; |
|
||||||
using std::vector; |
|
||||||
|
|
||||||
namespace rocksdb { |
|
||||||
namespace benchmark { |
|
||||||
|
|
||||||
BenchmarkSuspender::NanosecondsSpent BenchmarkSuspender::nsSpent; |
|
||||||
|
|
||||||
typedef function<uint64_t(unsigned int)> BenchmarkFun; |
|
||||||
static vector<tuple<const char*, const char*, BenchmarkFun>> benchmarks; |
|
||||||
|
|
||||||
// Add the global baseline
|
|
||||||
BENCHMARK(globalBenchmarkBaseline) { |
|
||||||
asm volatile(""); |
|
||||||
} |
|
||||||
|
|
||||||
void detail::AddBenchmarkImpl(const char* file, const char* name, |
|
||||||
BenchmarkFun fun) { |
|
||||||
benchmarks.emplace_back(file, name, std::move(fun)); |
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a point, gives density at that point as a number 0.0 < x <= |
|
||||||
* 1.0. The result is 1.0 if all samples are equal to where, and |
|
||||||
* decreases near 0 if all points are far away from it. The density is |
|
||||||
* computed with the help of a radial basis function. |
|
||||||
*/ |
|
||||||
static double Density(const double * begin, const double *const end, |
|
||||||
const double where, const double bandwidth) { |
|
||||||
assert(begin < end); |
|
||||||
assert(bandwidth > 0.0); |
|
||||||
double sum = 0.0; |
|
||||||
for (auto i = begin; i < end; i++) { |
|
||||||
auto d = (*i - where) / bandwidth; |
|
||||||
sum += exp(- d * d); |
|
||||||
} |
|
||||||
return sum / (end - begin); |
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes mean and variance for a bunch of data points. Note that |
|
||||||
* mean is currently not being used. |
|
||||||
*/ |
|
||||||
static pair<double, double> |
|
||||||
MeanVariance(const double * begin, const double *const end) { |
|
||||||
assert(begin < end); |
|
||||||
double sum = 0.0, sum2 = 0.0; |
|
||||||
for (auto i = begin; i < end; i++) { |
|
||||||
sum += *i; |
|
||||||
sum2 += *i * *i; |
|
||||||
} |
|
||||||
auto const n = end - begin; |
|
||||||
return make_pair(sum / n, sqrt((sum2 - sum * sum / n) / n)); |
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the mode of a sample set through brute force. Assumes |
|
||||||
* input is sorted. |
|
||||||
*/ |
|
||||||
static double Mode(const double * begin, const double *const end) { |
|
||||||
assert(begin < end); |
|
||||||
// Lower bound and upper bound for result and their respective
|
|
||||||
// densities.
|
|
||||||
auto |
|
||||||
result = 0.0, |
|
||||||
bestDensity = 0.0; |
|
||||||
|
|
||||||
// Get the variance so we pass it down to Density()
|
|
||||||
auto const sigma = MeanVariance(begin, end).second; |
|
||||||
if (!sigma) { |
|
||||||
// No variance means constant signal
|
|
||||||
return *begin; |
|
||||||
} |
|
||||||
|
|
||||||
for (auto i = begin; i < end; i++) { |
|
||||||
assert(i == begin || *i >= i[-1]); |
|
||||||
auto candidate = Density(begin, end, *i, sigma * sqrt(2.0)); |
|
||||||
if (candidate > bestDensity) { |
|
||||||
// Found a new best
|
|
||||||
bestDensity = candidate; |
|
||||||
result = *i; |
|
||||||
} else { |
|
||||||
// Density is decreasing... we could break here if we definitely
|
|
||||||
// knew this is unimodal.
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return result; |
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a bunch of benchmark samples, estimate the actual run time. |
|
||||||
*/ |
|
||||||
static double EstimateTime(double * begin, double * end) { |
|
||||||
assert(begin < end); |
|
||||||
|
|
||||||
// Current state of the art: get the minimum. After some
|
|
||||||
// experimentation, it seems taking the minimum is the best.
|
|
||||||
|
|
||||||
return *std::min_element(begin, end); |
|
||||||
|
|
||||||
// What follows after estimates the time as the mode of the
|
|
||||||
// distribution.
|
|
||||||
|
|
||||||
// Select the awesomest (i.e. most frequent) result. We do this by
|
|
||||||
// sorting and then computing the longest run length.
|
|
||||||
sort(begin, end); |
|
||||||
|
|
||||||
// Eliminate outliers. A time much larger than the minimum time is
|
|
||||||
// considered an outlier.
|
|
||||||
while (end[-1] > 2.0 * *begin) { |
|
||||||
--end; |
|
||||||
if (begin == end) { |
|
||||||
// LOG(INFO) << *begin;
|
|
||||||
} |
|
||||||
assert(begin < end); |
|
||||||
} |
|
||||||
|
|
||||||
double result = 0; |
|
||||||
|
|
||||||
/* Code used just for comparison purposes */ { |
|
||||||
unsigned bestFrequency = 0; |
|
||||||
unsigned candidateFrequency = 1; |
|
||||||
double candidateValue = *begin; |
|
||||||
for (auto current = begin + 1; ; ++current) { |
|
||||||
if (current == end || *current != candidateValue) { |
|
||||||
// Done with the current run, see if it was best
|
|
||||||
if (candidateFrequency > bestFrequency) { |
|
||||||
bestFrequency = candidateFrequency; |
|
||||||
result = candidateValue; |
|
||||||
} |
|
||||||
if (current == end) { |
|
||||||
break; |
|
||||||
} |
|
||||||
// Start a new run
|
|
||||||
candidateValue = *current; |
|
||||||
candidateFrequency = 1; |
|
||||||
} else { |
|
||||||
// Cool, inside a run, increase the frequency
|
|
||||||
++candidateFrequency; |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
result = Mode(begin, end); |
|
||||||
|
|
||||||
return result; |
|
||||||
} |
|
||||||
|
|
||||||
static double RunBenchmarkGetNSPerIteration(const BenchmarkFun& fun, |
|
||||||
const double globalBaseline) { |
|
||||||
// They key here is accuracy; too low numbers means the accuracy was
|
|
||||||
// coarse. We up the ante until we get to at least minNanoseconds
|
|
||||||
// timings.
|
|
||||||
static const auto minNanoseconds = FLAGS_bm_min_usec * 1000UL; |
|
||||||
|
|
||||||
// We do measurements in several epochs and take the minimum, to
|
|
||||||
// account for jitter.
|
|
||||||
static const unsigned int epochs = 1000; |
|
||||||
// We establish a total time budget as we don't want a measurement
|
|
||||||
// to take too long. This will curtail the number of actual epochs.
|
|
||||||
const uint64_t timeBudgetInNs = FLAGS_bm_max_secs * 1000000000; |
|
||||||
auto env = Env::Default(); |
|
||||||
uint64_t global = env->NowNanos(); |
|
||||||
|
|
||||||
double epochResults[epochs] = { 0 }; |
|
||||||
size_t actualEpochs = 0; |
|
||||||
|
|
||||||
for (; actualEpochs < epochs; ++actualEpochs) { |
|
||||||
for (unsigned int n = static_cast<unsigned int>(FLAGS_bm_min_iters); |
|
||||||
n < (1UL << 30); n *= 2) { |
|
||||||
auto const nsecs = fun(n); |
|
||||||
if (nsecs < minNanoseconds) { |
|
||||||
continue; |
|
||||||
} |
|
||||||
// We got an accurate enough timing, done. But only save if
|
|
||||||
// smaller than the current result.
|
|
||||||
epochResults[actualEpochs] = max(0.0, |
|
||||||
static_cast<double>(nsecs) / n - globalBaseline); |
|
||||||
// Done with the current epoch, we got a meaningful timing.
|
|
||||||
break; |
|
||||||
} |
|
||||||
uint64_t now = env->NowNanos(); |
|
||||||
if ((now - global) >= timeBudgetInNs) { |
|
||||||
// No more time budget available.
|
|
||||||
++actualEpochs; |
|
||||||
break; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// If the benchmark was basically drowned in baseline noise, it's
|
|
||||||
// possible it became negative.
|
|
||||||
return max(0.0, EstimateTime(epochResults, epochResults + actualEpochs)); |
|
||||||
} |
|
||||||
|
|
||||||
struct ScaleInfo { |
|
||||||
double boundary; |
|
||||||
const char* suffix; |
|
||||||
}; |
|
||||||
|
|
||||||
static const ScaleInfo kTimeSuffixes[] { |
|
||||||
{ 365.25 * 24 * 3600, "years" }, |
|
||||||
{ 24 * 3600, "days" }, |
|
||||||
{ 3600, "hr" }, |
|
||||||
{ 60, "min" }, |
|
||||||
{ 1, "s" }, |
|
||||||
{ 1E-3, "ms" }, |
|
||||||
{ 1E-6, "us" }, |
|
||||||
{ 1E-9, "ns" }, |
|
||||||
{ 1E-12, "ps" }, |
|
||||||
{ 1E-15, "fs" }, |
|
||||||
{ 0, nullptr }, |
|
||||||
}; |
|
||||||
|
|
||||||
static const ScaleInfo kMetricSuffixes[] { |
|
||||||
{ 1E24, "Y" }, // yotta
|
|
||||||
{ 1E21, "Z" }, // zetta
|
|
||||||
{ 1E18, "X" }, // "exa" written with suffix 'X' so as to not create
|
|
||||||
// confusion with scientific notation
|
|
||||||
{ 1E15, "P" }, // peta
|
|
||||||
{ 1E12, "T" }, // terra
|
|
||||||
{ 1E9, "G" }, // giga
|
|
||||||
{ 1E6, "M" }, // mega
|
|
||||||
{ 1E3, "K" }, // kilo
|
|
||||||
{ 1, "" }, |
|
||||||
{ 1E-3, "m" }, // milli
|
|
||||||
{ 1E-6, "u" }, // micro
|
|
||||||
{ 1E-9, "n" }, // nano
|
|
||||||
{ 1E-12, "p" }, // pico
|
|
||||||
{ 1E-15, "f" }, // femto
|
|
||||||
{ 1E-18, "a" }, // atto
|
|
||||||
{ 1E-21, "z" }, // zepto
|
|
||||||
{ 1E-24, "y" }, // yocto
|
|
||||||
{ 0, nullptr }, |
|
||||||
}; |
|
||||||
|
|
||||||
static string HumanReadable(double n, unsigned int decimals, |
|
||||||
const ScaleInfo* scales) { |
|
||||||
if (std::isinf(n) || std::isnan(n)) { |
|
||||||
return ToString(n); |
|
||||||
} |
|
||||||
|
|
||||||
const double absValue = fabs(n); |
|
||||||
const ScaleInfo* scale = scales; |
|
||||||
while (absValue < scale[0].boundary && scale[1].suffix != nullptr) { |
|
||||||
++scale; |
|
||||||
} |
|
||||||
|
|
||||||
const double scaledValue = n / scale->boundary; |
|
||||||
char a[80]; |
|
||||||
snprintf(a, sizeof(a), "%.*f%s", decimals, scaledValue, scale->suffix); |
|
||||||
return a; |
|
||||||
} |
|
||||||
|
|
||||||
static string ReadableTime(double n, unsigned int decimals) { |
|
||||||
return HumanReadable(n, decimals, kTimeSuffixes); |
|
||||||
} |
|
||||||
|
|
||||||
static string MetricReadable(double n, unsigned int decimals) { |
|
||||||
return HumanReadable(n, decimals, kMetricSuffixes); |
|
||||||
} |
|
||||||
|
|
||||||
static void PrintBenchmarkResultsAsTable( |
|
||||||
const vector<tuple<const char*, const char*, double> >& data) { |
|
||||||
// Width available
|
|
||||||
static const uint columns = 76; |
|
||||||
|
|
||||||
// Compute the longest benchmark name
|
|
||||||
size_t longestName = 0; |
|
||||||
for (size_t i = 1; i < benchmarks.size(); i++) { |
|
||||||
longestName = max(longestName, strlen(get<1>(benchmarks[i]))); |
|
||||||
} |
|
||||||
|
|
||||||
// Print a horizontal rule
|
|
||||||
auto separator = [&](char pad) { |
|
||||||
puts(string(columns, pad).c_str()); |
|
||||||
}; |
|
||||||
|
|
||||||
// Print header for a file
|
|
||||||
auto header = [&](const char* file) { |
|
||||||
separator('='); |
|
||||||
printf("%-*srelative time/iter iters/s\n", |
|
||||||
columns - 28, file); |
|
||||||
separator('='); |
|
||||||
}; |
|
||||||
|
|
||||||
double baselineNsPerIter = std::numeric_limits<double>::max(); |
|
||||||
const char* lastFile = ""; |
|
||||||
|
|
||||||
for (auto& datum : data) { |
|
||||||
auto file = get<0>(datum); |
|
||||||
if (strcmp(file, lastFile)) { |
|
||||||
// New file starting
|
|
||||||
header(file); |
|
||||||
lastFile = file; |
|
||||||
} |
|
||||||
|
|
||||||
string s = get<1>(datum); |
|
||||||
if (s == "-") { |
|
||||||
separator('-'); |
|
||||||
continue; |
|
||||||
} |
|
||||||
bool useBaseline /* = void */; |
|
||||||
if (s[0] == '%') { |
|
||||||
s.erase(0, 1); |
|
||||||
useBaseline = true; |
|
||||||
} else { |
|
||||||
baselineNsPerIter = get<2>(datum); |
|
||||||
useBaseline = false; |
|
||||||
} |
|
||||||
s.resize(columns - 29, ' '); |
|
||||||
auto nsPerIter = get<2>(datum); |
|
||||||
auto secPerIter = nsPerIter / 1E9; |
|
||||||
auto itersPerSec = 1 / secPerIter; |
|
||||||
if (!useBaseline) { |
|
||||||
// Print without baseline
|
|
||||||
printf("%*s %9s %7s\n", |
|
||||||
static_cast<int>(s.size()), s.c_str(), |
|
||||||
ReadableTime(secPerIter, 2).c_str(), |
|
||||||
MetricReadable(itersPerSec, 2).c_str()); |
|
||||||
} else { |
|
||||||
// Print with baseline
|
|
||||||
auto rel = baselineNsPerIter / nsPerIter * 100.0; |
|
||||||
printf("%*s %7.2f%% %9s %7s\n", |
|
||||||
static_cast<int>(s.size()), s.c_str(), |
|
||||||
rel, |
|
||||||
ReadableTime(secPerIter, 2).c_str(), |
|
||||||
MetricReadable(itersPerSec, 2).c_str()); |
|
||||||
} |
|
||||||
} |
|
||||||
separator('='); |
|
||||||
} |
|
||||||
|
|
||||||
void RunBenchmarks() { |
|
||||||
ASSERT_TRUE(!benchmarks.empty()); |
|
||||||
|
|
||||||
vector<tuple<const char*, const char*, double>> results; |
|
||||||
results.reserve(benchmarks.size() - 1); |
|
||||||
|
|
||||||
// PLEASE KEEP QUIET. MEASUREMENTS IN PROGRESS.
|
|
||||||
|
|
||||||
auto const globalBaseline = RunBenchmarkGetNSPerIteration( |
|
||||||
get<2>(benchmarks.front()), 0); |
|
||||||
for (size_t i = 1; i < benchmarks.size(); i++) { |
|
||||||
double elapsed = 0.0; |
|
||||||
if (strcmp(get<1>(benchmarks[i]), "-") != 0) { // skip separators
|
|
||||||
elapsed = RunBenchmarkGetNSPerIteration(get<2>(benchmarks[i]), |
|
||||||
globalBaseline); |
|
||||||
} |
|
||||||
results.emplace_back(get<0>(benchmarks[i]), |
|
||||||
get<1>(benchmarks[i]), elapsed); |
|
||||||
} |
|
||||||
|
|
||||||
// PLEASE MAKE NOISE. MEASUREMENTS DONE.
|
|
||||||
|
|
||||||
PrintBenchmarkResultsAsTable(results); |
|
||||||
} |
|
||||||
|
|
||||||
} // namespace benchmark
|
|
||||||
} // namespace rocksdb
|
|
@ -1,356 +0,0 @@ |
|||||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
||||||
// This source code is licensed under the BSD-style license found in the
|
|
||||||
// LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
// of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
//
|
|
||||||
// This code is derived from Benchmark.h implemented in Folly, the opensourced
|
|
||||||
// Facebook C++ library available at https://github.com/facebook/folly
|
|
||||||
// The code has removed any dependence on other folly and boost libraries
|
|
||||||
|
|
||||||
#pragma once |
|
||||||
|
|
||||||
#include <cassert> |
|
||||||
#include <functional> |
|
||||||
#include <limits> |
|
||||||
|
|
||||||
#include "util/testharness.h" |
|
||||||
#include "rocksdb/env.h" |
|
||||||
|
|
||||||
namespace rocksdb { |
|
||||||
namespace benchmark { |
|
||||||
|
|
||||||
/**
|
|
||||||
* Runs all benchmarks defined. Usually put in main(). |
|
||||||
*/ |
|
||||||
void RunBenchmarks(); |
|
||||||
|
|
||||||
namespace detail { |
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds a benchmark wrapped in a std::function. Only used |
|
||||||
* internally. Pass by value is intentional. |
|
||||||
*/ |
|
||||||
void AddBenchmarkImpl(const char* file, |
|
||||||
const char* name, |
|
||||||
std::function<uint64_t(unsigned int)>); |
|
||||||
|
|
||||||
} // namespace detail
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Supporting type for BENCHMARK_SUSPEND defined below. |
|
||||||
*/ |
|
||||||
struct BenchmarkSuspender { |
|
||||||
BenchmarkSuspender() { start_ = Env::Default()->NowNanos(); } |
|
||||||
|
|
||||||
BenchmarkSuspender(const BenchmarkSuspender&) = delete; |
|
||||||
BenchmarkSuspender(BenchmarkSuspender && rhs) { |
|
||||||
start_ = rhs.start_; |
|
||||||
rhs.start_ = 0; |
|
||||||
} |
|
||||||
|
|
||||||
BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete; |
|
||||||
BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) { |
|
||||||
if (start_ > 0) { |
|
||||||
tally(); |
|
||||||
} |
|
||||||
start_ = rhs.start_; |
|
||||||
rhs.start_ = 0; |
|
||||||
return *this; |
|
||||||
} |
|
||||||
|
|
||||||
~BenchmarkSuspender() { |
|
||||||
if (start_ > 0) { |
|
||||||
tally(); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
void Dismiss() { |
|
||||||
assert(start_ > 0); |
|
||||||
tally(); |
|
||||||
start_ = 0; |
|
||||||
} |
|
||||||
|
|
||||||
void Rehire() { start_ = Env::Default()->NowNanos(); } |
|
||||||
|
|
||||||
/**
|
|
||||||
* This helps the macro definition. To get around the dangers of |
|
||||||
* operator bool, returns a pointer to member (which allows no |
|
||||||
* arithmetic). |
|
||||||
*/ |
|
||||||
/* implicit */ |
|
||||||
operator int BenchmarkSuspender::*() const { return nullptr; } |
|
||||||
|
|
||||||
/**
|
|
||||||
* Accumulates nanoseconds spent outside benchmark. |
|
||||||
*/ |
|
||||||
typedef uint64_t NanosecondsSpent; |
|
||||||
static NanosecondsSpent nsSpent; |
|
||||||
|
|
||||||
private: |
|
||||||
void tally() { |
|
||||||
uint64_t end = Env::Default()->NowNanos(); |
|
||||||
nsSpent += start_ - end; |
|
||||||
start_ = end; |
|
||||||
} |
|
||||||
|
|
||||||
uint64_t start_; |
|
||||||
}; |
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds a benchmark. Usually not called directly but instead through |
|
||||||
* the macro BENCHMARK defined below. The lambda function involved |
|
||||||
* must take exactly one parameter of type unsigned, and the benchmark |
|
||||||
* uses it with counter semantics (iteration occurs inside the |
|
||||||
* function). |
|
||||||
*/ |
|
||||||
template <typename Lambda> |
|
||||||
void |
|
||||||
AddBenchmark_n(const char* file, const char* name, Lambda&& lambda) { |
|
||||||
auto execute = [=](unsigned int times) -> uint64_t { |
|
||||||
BenchmarkSuspender::nsSpent = 0; |
|
||||||
uint64_t start, end; |
|
||||||
auto env = Env::Default(); |
|
||||||
|
|
||||||
// CORE MEASUREMENT STARTS
|
|
||||||
start = env->NowNanos(); |
|
||||||
lambda(times); |
|
||||||
end = env->NowNanos(); |
|
||||||
// CORE MEASUREMENT ENDS
|
|
||||||
return (end - start) - BenchmarkSuspender::nsSpent; |
|
||||||
}; |
|
||||||
|
|
||||||
detail::AddBenchmarkImpl(file, name, |
|
||||||
std::function<uint64_t(unsigned int)>(execute)); |
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds a benchmark. Usually not called directly but instead through |
|
||||||
* the macro BENCHMARK defined below. The lambda function involved |
|
||||||
* must take zero parameters, and the benchmark calls it repeatedly |
|
||||||
* (iteration occurs outside the function). |
|
||||||
*/ |
|
||||||
template <typename Lambda> |
|
||||||
void |
|
||||||
AddBenchmark(const char* file, const char* name, Lambda&& lambda) { |
|
||||||
AddBenchmark_n(file, name, [=](unsigned int times) { |
|
||||||
while (times-- > 0) { |
|
||||||
lambda(); |
|
||||||
} |
|
||||||
}); |
|
||||||
} |
|
||||||
|
|
||||||
} // namespace benchmark
|
|
||||||
} // namespace rocksdb
|
|
||||||
|
|
||||||
/**
|
|
||||||
* FB_ONE_OR_NONE(hello, world) expands to hello and |
|
||||||
* FB_ONE_OR_NONE(hello) expands to nothing. This macro is used to |
|
||||||
* insert or eliminate text based on the presence of another argument. |
|
||||||
*/ |
|
||||||
#define FB_ONE_OR_NONE(a, ...) FB_THIRD(a, ## __VA_ARGS__, a) |
|
||||||
#define FB_THIRD(a, b, ...) __VA_ARGS__ |
|
||||||
|
|
||||||
#define FB_CONCATENATE_IMPL(s1, s2) s1##s2 |
|
||||||
#define FB_CONCATENATE(s1, s2) FB_CONCATENATE_IMPL(s1, s2) |
|
||||||
|
|
||||||
#define FB_ANONYMOUS_VARIABLE(str) \ |
|
||||||
FB_CONCATENATE(str, __LINE__ __attribute__((__unused__))) |
|
||||||
|
|
||||||
#define FB_STRINGIZE(x) #x |
|
||||||
|
|
||||||
/**
|
|
||||||
* Introduces a benchmark function. Used internally, see BENCHMARK and |
|
||||||
* friends below. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_IMPL_N(funName, stringName, paramType, paramName) \ |
|
||||||
static void funName(paramType); \
|
|
||||||
static bool FB_ANONYMOUS_VARIABLE(rocksdbBenchmarkUnused) = ( \
|
|
||||||
::rocksdb::benchmark::AddBenchmark_n(__FILE__, stringName, \
|
|
||||||
[](paramType paramName) { funName(paramName); }), \
|
|
||||||
true); \
|
|
||||||
static void funName(paramType paramName) |
|
||||||
|
|
||||||
#define BENCHMARK_IMPL(funName, stringName) \ |
|
||||||
static void funName(); \
|
|
||||||
static bool FB_ANONYMOUS_VARIABLE(rocksdbBenchmarkUnused) = ( \
|
|
||||||
::rocksdb::benchmark::AddBenchmark(__FILE__, stringName, \
|
|
||||||
[]() { funName(); }), \
|
|
||||||
true); \
|
|
||||||
static void funName() |
|
||||||
|
|
||||||
/**
|
|
||||||
* Introduces a benchmark function. Use with either one one or two |
|
||||||
* arguments. The first is the name of the benchmark. Use something |
|
||||||
* descriptive, such as insertVectorBegin. The second argument may be |
|
||||||
* missing, or could be a symbolic counter. The counter dictates how |
|
||||||
* many internal iteration the benchmark does. Example: |
|
||||||
* |
|
||||||
* BENCHMARK(vectorPushBack) { |
|
||||||
* vector<int> v; |
|
||||||
* v.push_back(42); |
|
||||||
* } |
|
||||||
* |
|
||||||
* BENCHMARK_N(insertVectorBegin, n) { |
|
||||||
* vector<int> v; |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* v.insert(v.begin(), 42); |
|
||||||
* } |
|
||||||
* } |
|
||||||
*/ |
|
||||||
#define BENCHMARK_N(name, ...) \ |
|
||||||
BENCHMARK_IMPL_N( \
|
|
||||||
name, \
|
|
||||||
FB_STRINGIZE(name), \
|
|
||||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
|
||||||
__VA_ARGS__) |
|
||||||
|
|
||||||
#define BENCHMARK(name) \ |
|
||||||
BENCHMARK_IMPL( \
|
|
||||||
name, \
|
|
||||||
FB_STRINGIZE(name)) |
|
||||||
|
|
||||||
/**
|
|
||||||
* Defines a benchmark that passes a parameter to another one. This is |
|
||||||
* common for benchmarks that need a "problem size" in addition to |
|
||||||
* "number of iterations". Consider: |
|
||||||
* |
|
||||||
* void pushBack(uint n, size_t initialSize) { |
|
||||||
* vector<int> v; |
|
||||||
* BENCHMARK_SUSPEND { |
|
||||||
* v.resize(initialSize); |
|
||||||
* } |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* v.push_back(i); |
|
||||||
* } |
|
||||||
* } |
|
||||||
* BENCHMARK_PARAM(pushBack, 0) |
|
||||||
* BENCHMARK_PARAM(pushBack, 1000) |
|
||||||
* BENCHMARK_PARAM(pushBack, 1000000) |
|
||||||
* |
|
||||||
* The benchmark above estimates the speed of push_back at different |
|
||||||
* initial sizes of the vector. The framework will pass 0, 1000, and |
|
||||||
* 1000000 for initialSize, and the iteration count for n. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_PARAM(name, param) \ |
|
||||||
BENCHMARK_NAMED_PARAM(name, param, param) |
|
||||||
|
|
||||||
/*
|
|
||||||
* Like BENCHMARK_PARAM(), but allows a custom name to be specified for each |
|
||||||
* parameter, rather than using the parameter value. |
|
||||||
* |
|
||||||
* Useful when the parameter value is not a valid token for string pasting, |
|
||||||
* of when you want to specify multiple parameter arguments. |
|
||||||
* |
|
||||||
* For example: |
|
||||||
* |
|
||||||
* void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) { |
|
||||||
* Histogram<int64_t> hist(bucketSize, min, max); |
|
||||||
* int64_t num = min; |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* hist.addValue(num); |
|
||||||
* ++num; |
|
||||||
* if (num > max) { num = min; } |
|
||||||
* } |
|
||||||
* } |
|
||||||
* |
|
||||||
* BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100) |
|
||||||
* BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000) |
|
||||||
* BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000) |
|
||||||
*/ |
|
||||||
#define BENCHMARK_NAMED_PARAM(name, param_name, ...) \ |
|
||||||
BENCHMARK_IMPL( \
|
|
||||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
|
||||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")") { \
|
|
||||||
name(__VA_ARGS__); \
|
|
||||||
} |
|
||||||
|
|
||||||
#define BENCHMARK_NAMED_PARAM_N(name, param_name, ...) \ |
|
||||||
BENCHMARK_IMPL_N( \
|
|
||||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
|
||||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
|
||||||
unsigned, \
|
|
||||||
iters) { \
|
|
||||||
name(iters, ## __VA_ARGS__); \
|
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Just like BENCHMARK, but prints the time relative to a |
|
||||||
* baseline. The baseline is the most recent BENCHMARK() seen in |
|
||||||
* lexical order. Example: |
|
||||||
* |
|
||||||
* // This is the baseline
|
|
||||||
* BENCHMARK_N(insertVectorBegin, n) { |
|
||||||
* vector<int> v; |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* v.insert(v.begin(), 42); |
|
||||||
* } |
|
||||||
* } |
|
||||||
* |
|
||||||
* BENCHMARK_RELATIVE_N(insertListBegin, n) { |
|
||||||
* list<int> s; |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* s.insert(s.begin(), 42); |
|
||||||
* } |
|
||||||
* } |
|
||||||
* |
|
||||||
* Any number of relative benchmark can be associated with a |
|
||||||
* baseline. Another BENCHMARK() occurrence effectively establishes a |
|
||||||
* new baseline. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_RELATIVE_N(name, ...) \ |
|
||||||
BENCHMARK_IMPL_N( \
|
|
||||||
name, \
|
|
||||||
"%" FB_STRINGIZE(name), \
|
|
||||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
|
||||||
__VA_ARGS__) |
|
||||||
|
|
||||||
#define BENCHMARK_RELATIVE(name) \ |
|
||||||
BENCHMARK_IMPL( \
|
|
||||||
name, \
|
|
||||||
"%" FB_STRINGIZE(name)) |
|
||||||
|
|
||||||
/**
|
|
||||||
* A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_RELATIVE_PARAM(name, param) \ |
|
||||||
BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param) |
|
||||||
|
|
||||||
/**
|
|
||||||
* A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \ |
|
||||||
BENCHMARK_IMPL_N( \
|
|
||||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
|
||||||
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
|
||||||
unsigned, \
|
|
||||||
iters) { \
|
|
||||||
name(iters, ## __VA_ARGS__); \
|
|
||||||
} |
|
||||||
|
|
||||||
/**
|
|
||||||
* Draws a line of dashes. |
|
||||||
*/ |
|
||||||
#define BENCHMARK_DRAW_LINE() \ |
|
||||||
static bool FB_ANONYMOUS_VARIABLE(rocksdbBenchmarkUnused) = ( \
|
|
||||||
::rocksdb::benchmark::AddBenchmark(__FILE__, "-", []() { }), \
|
|
||||||
true); |
|
||||||
|
|
||||||
/**
|
|
||||||
* Allows execution of code that doesn't count torward the benchmark's |
|
||||||
* time budget. Example: |
|
||||||
* |
|
||||||
* BENCHMARK_START_GROUP(insertVectorBegin, n) { |
|
||||||
* vector<int> v; |
|
||||||
* BENCHMARK_SUSPEND { |
|
||||||
* v.reserve(n); |
|
||||||
* } |
|
||||||
* FOR_EACH_RANGE (i, 0, n) { |
|
||||||
* v.insert(v.begin(), 42); |
|
||||||
* } |
|
||||||
* } |
|
||||||
*/ |
|
||||||
#define BENCHMARK_SUSPEND \ |
|
||||||
if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
|
|
||||||
::rocksdb::benchmark::BenchmarkSuspender()) {} \
|
|
||||||
else |
|
@ -1,67 +0,0 @@ |
|||||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
||||||
// This source code is licensed under the BSD-style license found in the
|
|
||||||
// LICENSE file in the root directory of this source tree. An additional grant
|
|
||||||
// of patent rights can be found in the PATENTS file in the same directory.
|
|
||||||
//
|
|
||||||
|
|
||||||
#include "util/benchharness.h" |
|
||||||
#include <vector> |
|
||||||
|
|
||||||
namespace rocksdb { |
|
||||||
|
|
||||||
BENCHMARK(insertFrontVector) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < 100; i++) { |
|
||||||
v.insert(v.begin(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_RELATIVE(insertBackVector) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < 100; i++) { |
|
||||||
v.insert(v.end(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_N(insertFrontVector_n, n) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.begin(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_RELATIVE_N(insertBackVector_n, n) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.end(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_N(insertFrontEnd_n, n) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.begin(), i); |
|
||||||
} |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.end(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
BENCHMARK_RELATIVE_N(insertFrontEndSuspend_n, n) { |
|
||||||
std::vector<size_t> v; |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.begin(), i); |
|
||||||
} |
|
||||||
BENCHMARK_SUSPEND { |
|
||||||
for (size_t i = 0; i < n; i++) { |
|
||||||
v.insert(v.end(), i); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
} // namespace rocksdb
|
|
||||||
|
|
||||||
int main(int argc, char** argv) { |
|
||||||
rocksdb::benchmark::RunBenchmarks(); |
|
||||||
return 0; |
|
||||||
} |
|
Loading…
Reference in new issue