|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
#pragma once
|
|
|
|
#include <atomic>
|
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "monitoring/histogram.h"
|
|
|
|
#include "port/likely.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
#include "util/core_local.h"
|
|
|
|
#include "util/mutexlock.h"
|
|
|
|
|
|
|
|
#ifdef __clang__
|
|
|
|
#define ROCKSDB_FIELD_UNUSED __attribute__((__unused__))
|
|
|
|
#else
|
|
|
|
#define ROCKSDB_FIELD_UNUSED
|
|
|
|
#endif // __clang__
|
|
|
|
|
|
|
|
#ifndef STRINGIFY
|
|
|
|
#define STRINGIFY(x) #x
|
|
|
|
#define TOSTRING(x) STRINGIFY(x)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
enum TickersInternal : uint32_t {
|
|
|
|
INTERNAL_TICKER_ENUM_START = TICKER_ENUM_MAX,
|
|
|
|
INTERNAL_TICKER_ENUM_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
enum HistogramsInternal : uint32_t {
|
|
|
|
INTERNAL_HISTOGRAM_START = HISTOGRAM_ENUM_MAX,
|
|
|
|
INTERNAL_HISTOGRAM_ENUM_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
class StatisticsImpl : public Statistics {
|
|
|
|
public:
|
|
|
|
StatisticsImpl(std::shared_ptr<Statistics> stats);
|
|
|
|
virtual ~StatisticsImpl();
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
static const char* kClassName() { return "BasicStatistics"; }
|
|
|
|
|
|
|
|
virtual uint64_t getTickerCount(uint32_t ticker_type) const override;
|
|
|
|
virtual void histogramData(uint32_t histogram_type,
|
|
|
|
HistogramData* const data) const override;
|
Add Statistics.getHistogramString() to print more detailed outputs of a histogram
Summary:
Provide a way for users to know more detailed ditribution of a histogram metrics. Example outputs:
Manually add statement
fprintf(stdout, "%s\n", dbstats->getHistogramString(SST_READ_MICROS).c_str());
Will print out something like:
Count: 989151 Average: 1.7659 StdDev: 1.52
Min: 0.0000 Median: 1.2071 Max: 860.0000
Percentiles: P50: 1.21 P75: 1.70 P99: 5.12 P99.9: 13.67 P99.99: 21.70
------------------------------------------------------
[ 0, 1 ) 390839 39.513% 39.513% ########
[ 1, 2 ) 500918 50.641% 90.154% ##########
[ 2, 3 ) 79358 8.023% 98.177% ##
[ 3, 4 ) 6297 0.637% 98.813%
[ 4, 5 ) 1712 0.173% 98.986%
[ 5, 6 ) 1134 0.115% 99.101%
[ 6, 7 ) 1222 0.124% 99.224%
[ 7, 8 ) 1529 0.155% 99.379%
[ 8, 9 ) 1264 0.128% 99.507%
[ 9, 10 ) 988 0.100% 99.607%
[ 10, 12 ) 1378 0.139% 99.746%
[ 12, 14 ) 1828 0.185% 99.931%
[ 14, 16 ) 410 0.041% 99.972%
[ 16, 18 ) 72 0.007% 99.980%
[ 18, 20 ) 67 0.007% 99.986%
[ 20, 25 ) 106 0.011% 99.997%
[ 25, 30 ) 24 0.002% 99.999%
[ 30, 35 ) 1 0.000% 100.000%
[ 250, 300 ) 2 0.000% 100.000%
[ 300, 350 ) 1 0.000% 100.000%
[ 800, 900 ) 1 0.000% 100.000%
Test Plan: Manually add a print in db_bench and make sure it prints out as expected. Will add some codes to cover the function
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D43611
10 years ago
|
|
|
std::string getHistogramString(uint32_t histogram_type) const override;
|
|
|
|
|
|
|
|
virtual void setTickerCount(uint32_t ticker_type, uint64_t count) override;
|
|
|
|
virtual uint64_t getAndResetTickerCount(uint32_t ticker_type) override;
|
|
|
|
virtual void recordTick(uint32_t ticker_type, uint64_t count) override;
|
|
|
|
// The function is implemented for now for backward compatibility reason.
|
|
|
|
// In case a user explictly calls it, for example, they may have a wrapped
|
|
|
|
// Statistics object, passing the call to recordTick() into here, nothing
|
|
|
|
// will break.
|
|
|
|
void measureTime(uint32_t histogramType, uint64_t time) override {
|
|
|
|
recordInHistogram(histogramType, time);
|
|
|
|
}
|
|
|
|
virtual void recordInHistogram(uint32_t histogram_type,
|
|
|
|
uint64_t value) override;
|
|
|
|
|
|
|
|
virtual Status Reset() override;
|
|
|
|
virtual std::string ToString() const override;
|
|
|
|
virtual bool getTickerMap(std::map<std::string, uint64_t>*) const override;
|
|
|
|
virtual bool HistEnabledForType(uint32_t type) const override;
|
|
|
|
|
|
|
|
const Customizable* Inner() const override { return stats_.get(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
// If non-nullptr, forwards updates to the object pointed to by `stats_`.
|
|
|
|
std::shared_ptr<Statistics> stats_;
|
|
|
|
// Synchronizes anything that operates across other cores' local data,
|
|
|
|
// such that operations like Reset() can be performed atomically.
|
|
|
|
mutable port::Mutex aggregate_lock_;
|
|
|
|
|
|
|
|
// The ticker/histogram data are stored in this structure, which we will store
|
|
|
|
// per-core. It is cache-aligned, so tickers/histograms belonging to different
|
|
|
|
// cores can never share the same cache line.
|
|
|
|
//
|
|
|
|
// Alignment attributes expand to nothing depending on the platform
|
|
|
|
struct ALIGN_AS(CACHE_LINE_SIZE) StatisticsData {
|
|
|
|
std::atomic_uint_fast64_t tickers_[INTERNAL_TICKER_ENUM_MAX] = {{0}};
|
|
|
|
HistogramImpl histograms_[INTERNAL_HISTOGRAM_ENUM_MAX];
|
|
|
|
#ifndef HAVE_ALIGNED_NEW
|
|
|
|
char
|
|
|
|
padding[(CACHE_LINE_SIZE -
|
|
|
|
(INTERNAL_TICKER_ENUM_MAX * sizeof(std::atomic_uint_fast64_t) +
|
|
|
|
INTERNAL_HISTOGRAM_ENUM_MAX * sizeof(HistogramImpl)) %
|
|
|
|
CACHE_LINE_SIZE)] ROCKSDB_FIELD_UNUSED;
|
|
|
|
#endif
|
|
|
|
void* operator new(size_t s) { return port::cacheline_aligned_alloc(s); }
|
|
|
|
void* operator new[](size_t s) { return port::cacheline_aligned_alloc(s); }
|
|
|
|
void operator delete(void* p) { port::cacheline_aligned_free(p); }
|
|
|
|
void operator delete[](void* p) { port::cacheline_aligned_free(p); }
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef TEST_CACHE_LINE_SIZE
|
|
|
|
static_assert(sizeof(StatisticsData) % CACHE_LINE_SIZE == 0,
|
|
|
|
"Expected " TOSTRING(CACHE_LINE_SIZE) "-byte aligned");
|
|
|
|
#endif
|
Thread-specific histogram statistics
Summary:
To reduce contention for atomics when HistogramStats are shared across
threads, this diff makes them thread-specific so updates are faster. This comes
at the expense of slower reads (much less frequent), which now require merging
all histograms. In this diff,
- Thread-specific HistogramImpl is created upon the thread's first measureTime()
- Thread-specific HistogramImpl are merged and deleted upon thread termination or ThreadLocalPtr destruction, whichever comes first
- getHistogramString() and histogramData() merge all histograms, both thread-specific and previously merged ones
Test Plan:
unit tests, ran db_bench and verified histograms look similar
before:
$ TEST_TMPDIR=/dev/shm/ perf record -g ./db_bench --benchmarks=readwhilewriting --statistics --num=1000000 --use_existing_db --threads=64 --cache_size=250000000 --compression_type=lz4
...
+ 7.63% db_bench db_bench [.] rocksdb::HistogramStat::Add
after:
$ TEST_TMPDIR=/dev/shm/ perf record -g ./db_bench --benchmarks=readwhilewriting --statistics --num=1000000 --use_existing_db --threads=64 --cache_size=250000000 --compression_type=lz4
...
+ 0.98% db_bench db_bench [.] rocksdb::HistogramStat::Add
Reviewers: sdong, MarkCallaghan, kradhakrishnan, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62649
8 years ago
|
|
|
|
|
|
|
CoreLocalArray<StatisticsData> per_core_stats_;
|
Thread-specific histogram statistics
Summary:
To reduce contention for atomics when HistogramStats are shared across
threads, this diff makes them thread-specific so updates are faster. This comes
at the expense of slower reads (much less frequent), which now require merging
all histograms. In this diff,
- Thread-specific HistogramImpl is created upon the thread's first measureTime()
- Thread-specific HistogramImpl are merged and deleted upon thread termination or ThreadLocalPtr destruction, whichever comes first
- getHistogramString() and histogramData() merge all histograms, both thread-specific and previously merged ones
Test Plan:
unit tests, ran db_bench and verified histograms look similar
before:
$ TEST_TMPDIR=/dev/shm/ perf record -g ./db_bench --benchmarks=readwhilewriting --statistics --num=1000000 --use_existing_db --threads=64 --cache_size=250000000 --compression_type=lz4
...
+ 7.63% db_bench db_bench [.] rocksdb::HistogramStat::Add
after:
$ TEST_TMPDIR=/dev/shm/ perf record -g ./db_bench --benchmarks=readwhilewriting --statistics --num=1000000 --use_existing_db --threads=64 --cache_size=250000000 --compression_type=lz4
...
+ 0.98% db_bench db_bench [.] rocksdb::HistogramStat::Add
Reviewers: sdong, MarkCallaghan, kradhakrishnan, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62649
8 years ago
|
|
|
|
|
|
|
uint64_t getTickerCountLocked(uint32_t ticker_type) const;
|
|
|
|
std::unique_ptr<HistogramImpl> getHistogramImplLocked(
|
|
|
|
uint32_t histogram_type) const;
|
|
|
|
void setTickerCountLocked(uint32_t ticker_type, uint64_t count);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Utility functions
|
|
|
|
inline void RecordInHistogram(Statistics* statistics, uint32_t histogram_type,
|
|
|
|
uint64_t value) {
|
|
|
|
if (statistics) {
|
|
|
|
statistics->recordInHistogram(histogram_type, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void RecordTimeToHistogram(Statistics* statistics,
|
|
|
|
uint32_t histogram_type, uint64_t value) {
|
|
|
|
if (statistics) {
|
|
|
|
statistics->reportTimeToHistogram(histogram_type, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void RecordTick(Statistics* statistics, uint32_t ticker_type,
|
|
|
|
uint64_t count = 1) {
|
|
|
|
if (statistics) {
|
|
|
|
statistics->recordTick(ticker_type, count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void SetTickerCount(Statistics* statistics, uint32_t ticker_type,
|
|
|
|
uint64_t count) {
|
|
|
|
if (statistics) {
|
|
|
|
statistics->setTickerCount(ticker_type, count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|