|
|
|
#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
|
|
|
#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
namespace leveldb {
|
|
|
|
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
12 years ago
|
|
|
enum PerfLevel {
|
|
|
|
kDisable = 0, // disable perf stats
|
|
|
|
kEnableCount = 1, // enable only count stats
|
|
|
|
kEnableTime = 2 // enable time stats too
|
|
|
|
};
|
|
|
|
|
|
|
|
// set the perf stats level
|
|
|
|
void SetPerfLevel(PerfLevel level);
|
|
|
|
|
|
|
|
// A thread local context for gathering performance counter efficiently
|
|
|
|
// and transparently.
|
|
|
|
|
|
|
|
struct PerfContext {
|
|
|
|
|
|
|
|
void Reset(); // reset all performance counters to zero
|
|
|
|
|
|
|
|
uint64_t user_key_comparison_count; // total number of user key comparisons
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
12 years ago
|
|
|
uint64_t block_cache_hit_count;
|
|
|
|
uint64_t block_read_count;
|
|
|
|
uint64_t block_read_byte;
|
|
|
|
uint64_t block_read_time;
|
|
|
|
uint64_t block_checksum_time;
|
|
|
|
uint64_t block_decompress_time;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern __thread PerfContext perf_context;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|