Statistics code cleanup

Summary: I'm separating code-cleanup part of https://reviews.facebook.net/D14517. This will make D14517 easier to understand and this diff easier to review.

Test Plan: make check

Reviewers: haobo, kailiu, sdong, dhruba, tnovak

Reviewed By: tnovak

CC: leveldb

Differential Revision: https://reviews.facebook.net/D15099
main
Igor Canadi 11 years ago
parent 0f4a75b710
commit 83681bf9ef
  1. 7
      db/compaction_picker.cc
  2. 9
      db/db_bench.cc
  3. 8
      db/db_impl.cc
  4. 14
      db/db_statistics.cc
  5. 63
      db/db_statistics.h
  6. 155
      db/db_test.cc
  7. 2
      db/memtable.cc
  8. 2
      db/merge_helper.cc
  9. 2
      db/simple_table_db_test.cc
  10. 2
      db/write_batch.cc
  11. 45
      include/rocksdb/statistics.h
  12. 21
      table/table_test.cc
  13. 2
      tools/db_stress.cc
  14. 66
      util/histogram.cc
  15. 18
      util/histogram.h
  16. 43
      util/statistics.cc
  17. 53
      util/statistics.h
  18. 32
      util/statistics_imp.h
  19. 8
      util/stop_watch.h

@ -8,6 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/compaction_picker.h"
#include "util/statistics.h"
namespace rocksdb {
@ -589,10 +590,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(Version* version) {
}
// update statistics
if (options_->statistics != nullptr) {
options_->statistics->measureTime(NUM_FILES_IN_SINGLE_COMPACTION,
c->inputs_[0].size());
}
MeasureTime(options_->statistics.get(), NUM_FILES_IN_SINGLE_COMPACTION,
c->inputs_[0].size());
// mark all the files that are being compacted
c->MarkFilesBeingCompacted(true);

@ -14,7 +14,7 @@
#include <gflags/gflags.h>
#include "db/db_impl.h"
#include "db/version_set.h"
#include "db/db_statistics.h"
#include "rocksdb/statistics.h"
#include "rocksdb/options.h"
#include "rocksdb/cache.h"
#include "rocksdb/db.h"
@ -30,6 +30,7 @@
#include "util/random.h"
#include "util/stack_trace.h"
#include "util/string_util.h"
#include "util/statistics.h"
#include "util/testutil.h"
#include "hdfs/env_hdfs.h"
#include "utilities/merge_operators.h"
@ -355,9 +356,9 @@ static bool ValidateCompressionLevel(const char* flagname, int32_t value) {
return true;
}
static const bool FLAGS_compression_level_dummy =
google::RegisterFlagValidator(&FLAGS_compression_level,
&ValidateCompressionLevel);
static const bool FLAGS_compression_level_dummy __attribute__((unused)) =
google::RegisterFlagValidator(&FLAGS_compression_level,
&ValidateCompressionLevel);
DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts"
" from this level. Levels with number < min_level_to_compress are"

@ -2564,9 +2564,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
CompactionStats stats;
stats.micros = env_->NowMicros() - start_micros - imm_micros;
if (options_.statistics.get()) {
options_.statistics.get()->measureTime(COMPACTION_TIME, stats.micros);
}
MeasureTime(options_.statistics.get(), COMPACTION_TIME, stats.micros);
stats.files_in_leveln = compact->compaction->num_input_files(0);
stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
@ -3062,8 +3060,8 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
// have succeeded in memtable but Status reports error for all writes.
throw std::runtime_error("In memory WriteBatch corruption!");
}
SetTickerCount(options_.statistics.get(),
SEQUENCE_NUMBER, last_sequence);
SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER,
last_sequence);
}
if (updates == &tmp_batch_) tmp_batch_.Clear();
mutex_.Lock();

@ -1,14 +0,0 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include "db/db_statistics.h"
namespace rocksdb {
std::shared_ptr<Statistics> CreateDBStatistics() {
return std::make_shared<DBStatistics>();
}
} // namespace rocksdb

@ -1,63 +0,0 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include <cassert>
#include <stdlib.h>
#include <vector>
#include <memory>
#include "rocksdb/statistics.h"
#include "util/histogram.h"
#include "port/port.h"
#include "util/mutexlock.h"
namespace rocksdb {
class DBStatistics: public Statistics {
public:
DBStatistics() : allTickers_(TICKER_ENUM_MAX),
allHistograms_(HISTOGRAM_ENUM_MAX) { }
virtual ~DBStatistics() {}
virtual long getTickerCount(Tickers tickerType) {
assert(tickerType < TICKER_ENUM_MAX);
return allTickers_[tickerType].getCount();
}
virtual void setTickerCount(Tickers tickerType, uint64_t count) {
assert(tickerType < TICKER_ENUM_MAX);
allTickers_[tickerType].setTickerCount(count);
}
virtual void recordTick(Tickers tickerType, uint64_t count) {
assert(tickerType < TICKER_ENUM_MAX);
allTickers_[tickerType].recordTick(count);
}
virtual void measureTime(Histograms histogramType, uint64_t value) {
assert(histogramType < HISTOGRAM_ENUM_MAX);
allHistograms_[histogramType].Add(value);
}
virtual void histogramData(Histograms histogramType,
HistogramData * const data) {
assert(histogramType < HISTOGRAM_ENUM_MAX);
allHistograms_[histogramType].Data(data);
}
std::vector<Ticker> allTickers_;
std::vector<HistogramImpl> allHistograms_;
};
std::shared_ptr<Statistics> CreateDBStatistics();
} // namespace rocksdb

@ -17,7 +17,6 @@
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "db/db_statistics.h"
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
#include "rocksdb/env.h"
@ -27,6 +26,7 @@
#include "util/mutexlock.h"
#include "util/testharness.h"
#include "util/testutil.h"
#include "util/statistics.h"
#include "utilities/merge_operators.h"
namespace rocksdb {
@ -677,6 +677,10 @@ static std::string Key(int i) {
return std::string(buf);
}
static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type);
}
TEST(DBTest, Empty) {
do {
ASSERT_TRUE(db_ != nullptr);
@ -710,14 +714,11 @@ TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
dbfull()->Flush(FlushOptions());
// index/filter blocks added to block cache right after table creation.
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(2, /* only index/filter were added */
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(0,
options.statistics.get()->getTickerCount(BLOCK_CACHE_DATA_MISS));
TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
// Make sure filter block is in cache.
std::string value;
@ -725,31 +726,24 @@ TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
db_->KeyMayExist(ReadOptions(), "key", &value);
// Miss count should remain the same.
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
db_->KeyMayExist(ReadOptions(), "key", &value);
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(2,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
// Make sure index block is in cache.
auto index_block_hit =
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT);
auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
value = Get("key");
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(index_block_hit + 1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
value = Get("key");
ASSERT_EQ(1,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(index_block_hit + 2,
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
}
TEST(DBTest, LevelLimitReopen) {
@ -964,47 +958,39 @@ TEST(DBTest, KeyMayExist) {
dbfull()->Flush(FlushOptions());
value.clear();
long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
long cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
ASSERT_TRUE(!value_found);
// assert that no new files were opened and no new blocks were
// read into block cache.
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_OK(db_->Delete(WriteOptions(), "a"));
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
dbfull()->Flush(FlushOptions());
dbfull()->CompactRange(nullptr, nullptr);
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_OK(db_->Delete(WriteOptions(), "c"));
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "c", &value));
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete options.filter_policy;
} while (ChangeOptions());
@ -1037,9 +1023,8 @@ TEST(DBTest, NonBlockingIteration) {
// verify that a non-blocking iterator does not find any
// kvs. Neither does it do any IOs to storage.
long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
long cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
iter = db_->NewIterator(non_blocking_opts);
count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
@ -1047,18 +1032,16 @@ TEST(DBTest, NonBlockingIteration) {
}
ASSERT_EQ(count, 0);
ASSERT_TRUE(iter->status().IsIncomplete());
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete iter;
// read in the specified block via a regular get
ASSERT_EQ(Get("a"), "b");
// verify that we can find it via a non-blocking scan
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
cache_added =
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
iter = db_->NewIterator(non_blocking_opts);
count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
@ -1066,9 +1049,8 @@ TEST(DBTest, NonBlockingIteration) {
count++;
}
ASSERT_EQ(count, 1);
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
ASSERT_EQ(cache_added,
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete iter;
} while (ChangeOptions());
@ -1273,12 +1255,10 @@ TEST(DBTest, IterReseek) {
ASSERT_OK(Put("b", "bone"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "a->two");
iter->Next();
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
@ -1289,8 +1269,7 @@ TEST(DBTest, IterReseek) {
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->three");
iter->Next();
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
@ -1300,30 +1279,28 @@ TEST(DBTest, IterReseek) {
iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->four");
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
iter->Next();
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), 1);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
// Testing reverse iterator
// At this point, we have three versions of "a" and one version of "b".
// The reseek statistics is already at 1.
int num_reseeks = (int)options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION);
int num_reseeks =
(int)TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION);
// Insert another version of b and assert that reseek is not invoked
ASSERT_OK(Put("b", "btwo"));
iter = db_->NewIterator(ReadOptions());
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "b->btwo");
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks);
iter->Prev();
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks+1);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 1);
ASSERT_EQ(IterStatus(iter), "a->four");
delete iter;
@ -1334,13 +1311,13 @@ TEST(DBTest, IterReseek) {
iter = db_->NewIterator(ReadOptions());
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "b->bfour");
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 2);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 2);
iter->Prev();
// the previous Prev call should have invoked reseek
ASSERT_EQ(options.statistics.get()->getTickerCount(
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 3);
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 3);
ASSERT_EQ(IterStatus(iter), "a->four");
delete iter;
}
@ -2103,24 +2080,18 @@ TEST(DBTest, CompressedCache) {
switch (iter) {
case 0:
// only uncompressed block cache
ASSERT_GT(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
0);
ASSERT_EQ(options.statistics.get()->getTickerCount
(BLOCK_CACHE_COMPRESSED_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 1:
// no block cache, only compressed cache
ASSERT_EQ(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
0);
ASSERT_GT(options.statistics.get()->getTickerCount
(BLOCK_CACHE_COMPRESSED_MISS), 0);
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 2:
// both compressed and uncompressed block cache
ASSERT_GT(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
0);
ASSERT_GT(options.statistics.get()->getTickerCount
(BLOCK_CACHE_COMPRESSED_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
default:
ASSERT_TRUE(false);

@ -20,7 +20,7 @@
#include "util/coding.h"
#include "util/mutexlock.h"
#include "util/murmurhash.h"
#include "util/statistics_imp.h"
#include "util/statistics.h"
namespace std {
template <>

@ -8,7 +8,7 @@
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
#include "rocksdb/merge_operator.h"
#include "util/statistics_imp.h"
#include "util/statistics.h"
#include <string>
#include <stdio.h>

@ -17,7 +17,7 @@
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "db/db_statistics.h"
#include "rocksdb/statistics.h"
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
#include "rocksdb/env.h"

@ -28,7 +28,7 @@
#include "db/snapshot.h"
#include "db/write_batch_internal.h"
#include "util/coding.h"
#include "util/statistics_imp.h"
#include "util/statistics.h"
#include <stdexcept>
namespace rocksdb {

@ -242,53 +242,10 @@ struct HistogramData {
double standard_deviation;
};
class Histogram {
public:
// clear's the histogram
virtual void Clear() = 0;
virtual ~Histogram();
// Add a value to be recorded in the histogram.
virtual void Add(uint64_t value) = 0;
virtual std::string ToString() const = 0;
// Get statistics
virtual double Median() const = 0;
virtual double Percentile(double p) const = 0;
virtual double Average() const = 0;
virtual double StandardDeviation() const = 0;
virtual void Data(HistogramData * const data) const = 0;
};
/**
* A dumb ticker which keeps incrementing through its life time.
* Thread safe. Locking managed by implementation of this interface.
*/
class Ticker {
public:
Ticker() : count_(0) { }
inline void setTickerCount(uint64_t count) {
count_ = count;
}
inline void recordTick(int count = 1) {
count_ += count;
}
inline uint64_t getCount() {
return count_;
}
private:
std::atomic_uint_fast64_t count_;
};
// Analyze the performance of a db
class Statistics {
public:
virtual ~Statistics() {}
virtual long getTickerCount(Tickers tickerType) = 0;
virtual void recordTick(Tickers tickerType, uint64_t count = 0) = 0;

@ -12,7 +12,8 @@
#include <vector>
#include "db/dbformat.h"
#include "db/db_statistics.h"
#include "rocksdb/statistics.h"
#include "util/statistics.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "rocksdb/cache.h"
@ -935,18 +936,12 @@ TEST(TableTest, NumBlockStat) {
class BlockCacheProperties {
public:
explicit BlockCacheProperties(Statistics* statistics) {
block_cache_miss =
statistics->getTickerCount(BLOCK_CACHE_MISS);
block_cache_hit =
statistics->getTickerCount(BLOCK_CACHE_HIT);
index_block_cache_miss =
statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
index_block_cache_hit =
statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
data_block_cache_miss =
statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
data_block_cache_hit =
statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS);
block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT);
index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
}
// Check if the fetched props matches the expected ones.

@ -26,7 +26,7 @@
#include <gflags/gflags.h>
#include "db/db_impl.h"
#include "db/version_set.h"
#include "db/db_statistics.h"
#include "rocksdb/statistics.h"
#include "rocksdb/cache.h"
#include "utilities/utility_db.h"
#include "rocksdb/env.h"

@ -16,27 +16,38 @@
namespace rocksdb {
HistogramBucketMapper::HistogramBucketMapper() :
// Add newer bucket index here.
// Should be alwyas added in sorted order.
bucketValues_({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
1000000000}),
maxBucketValue_(bucketValues_.back()),
minBucketValue_(bucketValues_.front()) {
HistogramBucketMapper::HistogramBucketMapper()
:
// Add newer bucket index here.
// Should be alwyas added in sorted order.
// If you change this, you also need to change
// size of array buckets_ in HistogramImpl
bucketValues_(
{1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 12, 14,
16, 18, 20, 25, 30, 35,
40, 45, 50, 60, 70, 80,
90, 100, 120, 140, 160, 180,
200, 250, 300, 350, 400, 450,
500, 600, 700, 800, 900, 1000,
1200, 1400, 1600, 1800, 2000, 2500,
3000, 3500, 4000, 4500, 5000, 6000,
7000, 8000, 9000, 10000, 12000, 14000,
16000, 18000, 20000, 25000, 30000, 35000,
40000, 45000, 50000, 60000, 70000, 80000,
90000, 100000, 120000, 140000, 160000, 180000,
200000, 250000, 300000, 350000, 400000, 450000,
500000, 600000, 700000, 800000, 900000, 1000000,
1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
3000000, 3500000, 4000000, 4500000, 5000000, 6000000,
7000000, 8000000, 9000000, 10000000, 12000000, 14000000,
16000000, 18000000, 20000000, 25000000, 30000000, 35000000,
40000000, 45000000, 50000000, 60000000, 70000000, 80000000,
90000000, 100000000, 120000000, 140000000, 160000000, 180000000,
200000000, 250000000, 300000000, 350000000, 400000000, 450000000,
500000000, 600000000, 700000000, 800000000, 900000000, 1000000000}),
maxBucketValue_(bucketValues_.back()),
minBucketValue_(bucketValues_.front()) {
for (size_t i =0; i < bucketValues_.size(); ++i) {
valueIndexMap_[bucketValues_[i]] = i;
}
@ -62,24 +73,17 @@ namespace {
const HistogramBucketMapper bucketMapper;
}
HistogramImpl::HistogramImpl() :
min_(bucketMapper.LastValue()),
max_(0),
num_(0),
sum_(0),
sum_squares_(0),
buckets_(std::vector<uint64_t>(bucketMapper.BucketCount(), 0)) {}
void HistogramImpl::Clear() {
min_ = bucketMapper.LastValue();
max_ = 0;
num_ = 0;
sum_ = 0;
sum_squares_ = 0;
buckets_.resize(bucketMapper.BucketCount(), 0);
memset(buckets_, 0, sizeof buckets_);
}
bool HistogramImpl::Empty() { return sum_squares_ == 0; }
void HistogramImpl::Add(uint64_t value) {
const size_t index = bucketMapper.IndexForValue(value);
buckets_[index] += 1;

@ -52,9 +52,8 @@ class HistogramBucketMapper {
class HistogramImpl {
public:
HistogramImpl();
virtual ~HistogramImpl() {}
virtual void Clear();
virtual bool Empty();
virtual void Add(uint64_t value);
void Merge(const HistogramImpl& other);
@ -67,13 +66,14 @@ class HistogramImpl {
virtual void Data(HistogramData * const data) const;
private:
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
std::vector<uint64_t> buckets_;
// To be able to use HistogramImpl as thread local variable, its constructor
// has to be static. That's why we're using manually values from BucketMapper
double min_ = 1000000000; // this is BucketMapper:LastValue()
double max_ = 0;
double num_ = 0;
double sum_ = 0;
double sum_squares_ = 0;
uint64_t buckets_[138] = {0}; // this is BucketMapper::BucketCount()
};
} // namespace rocksdb

@ -3,12 +3,48 @@
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
#include "util/statistics.h"
#include "rocksdb/statistics.h"
#include <cstdio>
namespace rocksdb {
std::shared_ptr<Statistics> CreateDBStatistics() {
return std::make_shared<StatisticsImpl>();
}
StatisticsImpl::StatisticsImpl() {}
StatisticsImpl::~StatisticsImpl() {}
long StatisticsImpl::getTickerCount(Tickers tickerType) {
assert(tickerType < TICKER_ENUM_MAX);
return tickers_[tickerType];
}
void StatisticsImpl::setTickerCount(Tickers tickerType, uint64_t count) {
assert(tickerType < TICKER_ENUM_MAX);
tickers_[tickerType] = count;
}
void StatisticsImpl::recordTick(Tickers tickerType, uint64_t count) {
assert(tickerType < TICKER_ENUM_MAX);
tickers_[tickerType] += count;
}
void StatisticsImpl::measureTime(Histograms histogramType, uint64_t value) {
assert(histogramType < HISTOGRAM_ENUM_MAX);
histograms_[histogramType].Add(value);
}
void StatisticsImpl::histogramData(Histograms histogramType,
HistogramData* const data) {
assert(histogramType < HISTOGRAM_ENUM_MAX);
histograms_[histogramType].Data(data);
}
namespace {
// a buffer size used for temp string buffers
const int kBufferSize = 200;
@ -32,11 +68,8 @@ std::string HistogramToString (
return std::string(buffer);
};
std::string TickerToString (
Statistics* dbstats,
const Tickers& ticker,
const std::string& name) {
std::string TickerToString(Statistics* dbstats, const Tickers& ticker,
const std::string& name) {
char buffer[kBufferSize];
snprintf(buffer, kBufferSize, "%s COUNT : %ld\n",
name.c_str(), dbstats->getTickerCount(ticker));

@ -0,0 +1,53 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
#pragma once
#include "rocksdb/statistics.h"
#include "util/histogram.h"
#include "util/mutexlock.h"
#define UNLIKELY(val) (__builtin_expect((val), 0))
namespace rocksdb {
class StatisticsImpl : public Statistics {
public:
StatisticsImpl();
virtual ~StatisticsImpl();
virtual long getTickerCount(Tickers tickerType);
virtual void setTickerCount(Tickers tickerType, uint64_t count);
virtual void recordTick(Tickers tickerType, uint64_t count);
virtual void measureTime(Histograms histogramType, uint64_t value);
virtual void histogramData(Histograms histogramType,
HistogramData* const data);
private:
std::atomic_uint_fast64_t tickers_[TICKER_ENUM_MAX];
HistogramImpl histograms_[HISTOGRAM_ENUM_MAX];
};
// Utility functions
inline void MeasureTime(Statistics* statistics, Histograms histogramType,
uint64_t value) {
if (statistics) {
statistics->measureTime(histogramType, value);
}
}
inline void RecordTick(Statistics* statistics, Tickers ticker,
uint64_t count = 1) {
if (statistics) {
statistics->recordTick(ticker, count);
}
}
inline void SetTickerCount(Statistics* statistics, Tickers ticker,
uint64_t count) {
if (statistics) {
statistics->setTickerCount(ticker, count);
}
}
}

@ -1,32 +0,0 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
#pragma once
#include "rocksdb/statistics.h"
namespace rocksdb {
// Utility functions
inline void RecordTick(Statistics* statistics,
Tickers ticker,
uint64_t count = 1) {
assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX);
assert(TickersNameMap.size() == TICKER_ENUM_MAX);
if (statistics) {
statistics->recordTick(ticker, count);
}
}
inline void SetTickerCount(Statistics* statistics,
Tickers ticker,
uint64_t count) {
assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX);
assert(TickersNameMap.size() == TICKER_ENUM_MAX);
if (statistics) {
statistics->setTickerCount(ticker, count);
}
}
}

@ -5,7 +5,7 @@
//
#pragma once
#include "rocksdb/env.h"
#include "util/statistics_imp.h"
#include "util/statistics.h"
namespace rocksdb {
// Auto-scoped.
@ -28,11 +28,7 @@ class StopWatch {
return env_->NowMicros() - start_time_;
}
~StopWatch() {
if (statistics_) {
statistics_->measureTime(histogram_name_, ElapsedMicros());
}
}
~StopWatch() { MeasureTime(statistics_, histogram_name_, ElapsedMicros()); }
private:
Env* const env_;

Loading…
Cancel
Save