Benchmark table reader wiht nanoseconds

Summary: nanosecnods gave us better view of the performance, especially when some operations are fast so that micro seconds may only reveal less informative results.

Test Plan:
sample output:

    ./table_reader_bench --plain_table --time_unit=nanosecond
    =======================================================================================================
    InMemoryTableSimpleBenchmark:           PlainTable   num_key1:   4096   num_key2:   512   non_empty
    =======================================================================================================
    Histogram (unit: nanosecond):
    Count: 6291456  Average: 475.3867  StdDev: 556.05
    Min: 135.0000  Median: 400.1817  Max: 33370.0000
    Percentiles: P50: 400.18 P75: 530.02 P99: 887.73 P99.9: 8843.26 P99.99: 9941.21
    ------------------------------------------------------
    [     120,     140 )        2   0.000%   0.000%
    [     140,     160 )      452   0.007%   0.007%
    [     160,     180 )    13683   0.217%   0.225%
    [     180,     200 )    54353   0.864%   1.089%
    [     200,     250 )   101004   1.605%   2.694%
    [     250,     300 )   729791  11.600%  14.294% ##
    [     300,     350 )   616070   9.792%  24.086% ##
    [     350,     400 )  1628021  25.877%  49.963% #####
    [     400,     450 )   647220  10.287%  60.250% ##
    [     450,     500 )   577206   9.174%  69.424% ##
    [     500,     600 )  1168585  18.574%  87.999% ####
    [     600,     700 )   506875   8.057%  96.055% ##
    [     700,     800 )   147878   2.350%  98.406%
    [     800,     900 )    42633   0.678%  99.083%
    [     900,    1000 )    16304   0.259%  99.342%
    [    1000,    1200 )     7811   0.124%  99.466%
    [    1200,    1400 )     1453   0.023%  99.490%
    [    1400,    1600 )      307   0.005%  99.494%
    [    1600,    1800 )       81   0.001%  99.496%
    [    1800,    2000 )       18   0.000%  99.496%
    [    2000,    2500 )        8   0.000%  99.496%
    [    2500,    3000 )        6   0.000%  99.496%
    [    3500,    4000 )        3   0.000%  99.496%
    [    4000,    4500 )      116   0.002%  99.498%
    [    4500,    5000 )     1144   0.018%  99.516%
    [    5000,    6000 )     1087   0.017%  99.534%
    [    6000,    7000 )     2403   0.038%  99.572%
    [    7000,    8000 )     9840   0.156%  99.728%
    [    8000,    9000 )    12820   0.204%  99.932%
    [    9000,   10000 )     3881   0.062%  99.994%
    [   10000,   12000 )      135   0.002%  99.996%
    [   12000,   14000 )      159   0.003%  99.998%
    [   14000,   16000 )       58   0.001%  99.999%
    [   16000,   18000 )       30   0.000% 100.000%
    [   18000,   20000 )       14   0.000% 100.000%
    [   20000,   25000 )        2   0.000% 100.000%
    [   25000,   30000 )        2   0.000% 100.000%
    [   30000,   35000 )        1   0.000% 100.000%

Reviewers: haobo, dhruba, sdong

CC: leveldb

Differential Revision: https://reviews.facebook.net/D16113
main
Kai Liu 11 years ago
parent b5140a0361
commit 59cffe02c4
  1. 35
      table/table_reader_bench.cc

@ -40,6 +40,10 @@ static bool DummySaveValue(void* arg, const ParsedInternalKey& ikey,
return false;
}
uint64_t Now(Env* env, bool measured_by_nanosecond) {
return measured_by_nanosecond ? env->NowNanos() : env->NowMicros();
}
// A very simple benchmark that.
// Create a table with roughly numKey1 * numKey2 keys,
// where there are numKey1 prefixes of the key, each has numKey2 number of
@ -57,7 +61,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
ReadOptions& read_options, int num_keys1,
int num_keys2, int num_iter, int prefix_len,
bool if_query_empty_keys, bool for_iterator,
bool through_db) {
bool through_db, bool measured_by_nanosecond) {
rocksdb::InternalKeyComparator ikc(opts.comparator);
Slice prefix = Slice();
@ -126,7 +130,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
if (!for_iterator) {
// Query one existing key;
std::string key = MakeKey(r1, r2, through_db);
uint64_t start_micros = env->NowMicros();
uint64_t start_micros = Now(env, measured_by_nanosecond);
port::MemoryBarrier();
if (!through_db) {
s = table_reader->Get(read_options, key, arg, DummySaveValue,
@ -135,7 +139,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
s = db->Get(read_options, key, &result);
}
port::MemoryBarrier();
hist.Add(env->NowMicros() - start_micros);
hist.Add(Now(env, measured_by_nanosecond) - start_micros);
} else {
int r2_len;
if (if_query_empty_keys) {
@ -153,7 +157,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
read_options.prefix = &prefix;
}
uint64_t total_time = 0;
uint64_t start_micros = env->NowMicros();
uint64_t start_micros = Now(env, measured_by_nanosecond);
port::MemoryBarrier();
Iterator* iter;
if (!through_db) {
@ -168,9 +172,9 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
}
// verify key;
port::MemoryBarrier();
total_time += env->NowMicros() - start_micros;
total_time += Now(env, measured_by_nanosecond) - start_micros;
assert(Slice(MakeKey(r1, r2 + count, through_db)) == iter->key());
start_micros = env->NowMicros();
start_micros = Now(env, measured_by_nanosecond);
if (++count >= r2_len) {
break;
}
@ -183,7 +187,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
}
delete iter;
port::MemoryBarrier();
total_time += env->NowMicros() - start_micros;
total_time += Now(env, measured_by_nanosecond) - start_micros;
hist.Add(total_time);
}
}
@ -198,9 +202,10 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
"num_key2: %5d %10s\n"
"==================================================="
"===================================================="
"\nHistogram (unit: microseconds): \n%s",
"\nHistogram (unit: %s): \n%s",
opts.table_factory->Name(), num_keys1, num_keys2,
for_iterator? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"),
for_iterator ? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"),
measured_by_nanosecond ? "nanosecond" : "microsecond",
hist.ToString().c_str());
if (!through_db) {
env->DeleteFile(file_name);
@ -210,7 +215,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
DestroyDB(dbname, opts);
}
}
} // namespace rocksdb
} // namespace rocksdb
DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
"ones.");
@ -223,7 +228,9 @@ DEFINE_bool(through_db, false, "If enable, a DB instance will be created and "
"the query will be against DB. Otherwise, will be directly against "
"a table reader.");
DEFINE_bool(plain_table, false, "Use PlainTable");
DEFINE_string(time_unit, "microsecond",
"The time unit used for measuring performance. User can specify "
"`microsecond` (default) or `nanosecond`");
int main(int argc, char** argv) {
google::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
@ -252,11 +259,15 @@ int main(int argc, char** argv) {
} else {
tf = new rocksdb::BlockBasedTableFactory();
}
// if user provides invalid options, just fall back to microsecond.
bool measured_by_nanosecond = FLAGS_time_unit == "nanosecond";
options.table_factory =
std::shared_ptr<rocksdb::TableFactory>(tf);
TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1,
FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len,
FLAGS_query_empty, FLAGS_iterator, FLAGS_through_db);
FLAGS_query_empty, FLAGS_iterator, FLAGS_through_db,
measured_by_nanosecond);
delete tf;
return 0;
}

Loading…
Cancel
Save