diff --git a/db/builder.cc b/db/builder.cc index b3bf894ef..ad1334a15 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -112,6 +112,7 @@ Status BuildTable(const std::string& dbname, if (this_ikey.type == kTypeMerge) { // Handle merge-type keys using the MergeHelper + // TODO: pass statistics to MergeUntil merge.MergeUntil(iter, 0 /* don't worry about snapshot */); iterator_at_next = true; if (merge.IsSuccess()) { @@ -188,10 +189,10 @@ Status BuildTable(const std::string& dbname, // Finish and check for file errors if (s.ok() && !options.disableDataSync) { if (options.use_fsync) { - StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS); + StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS); s = file->Fsync(); } else { - StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS); + StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS); s = file->Sync(); } } diff --git a/db/db_impl.cc b/db/db_impl.cc index 5a2f0de4a..22dba5f2e 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -404,7 +404,7 @@ const Status DBImpl::CreateArchivalDirectory() { } void DBImpl::PrintStatistics() { - auto dbstats = options_.statistics; + auto dbstats = options_.statistics.get(); if (dbstats) { Log(options_.info_log, "STATISTCS:\n %s", @@ -860,7 +860,7 @@ Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table, if (versions_->LastSequence() < max_sequence) { versions_->SetLastSequence(max_sequence); } - SetTickerCount(options_.statistics, SEQUENCE_NUMBER, + SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER, versions_->LastSequence()); } } @@ -1297,7 +1297,7 @@ SequenceNumber DBImpl::GetLatestSequenceNumber() const { Status DBImpl::GetUpdatesSince(SequenceNumber seq, unique_ptr* iter) { - RecordTick(options_.statistics, GET_UPDATES_SINCE_CALLS); + RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS); if (seq > versions_->LastSequence()) { return Status::IOError("Requested sequence not yet written in the db"); } @@ -1971,10 +1971,12 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, // Finish and check for file errors if (s.ok() && !options_.disableDataSync) { if (options_.use_fsync) { - StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS); + StopWatch sw(env_, options_.statistics.get(), + COMPACTION_OUTFILE_SYNC_MICROS); s = compact->outfile->Fsync(); } else { - StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS); + StopWatch sw(env_, options_.statistics.get(), + COMPACTION_OUTFILE_SYNC_MICROS); s = compact->outfile->Sync(); } } @@ -2212,7 +2214,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, ParseInternalKey(key, &ikey); // no value associated with delete value.clear(); - RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER); + RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_USER); } else if (value_changed) { value = compaction_filter_value; } @@ -2238,7 +2240,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, // TODO: why not > ? assert(last_sequence_for_key >= ikey.sequence); drop = true; // (A) - RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY); + RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_NEWER_ENTRY); } else if (ikey.type == kTypeDeletion && ikey.sequence <= earliest_snapshot && compact->compaction->IsBaseLevelForKey(ikey.user_key)) { @@ -2250,7 +2252,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, // few iterations of this loop (by rule (A) above). // Therefore this deletion marker is obsolete and can be dropped. drop = true; - RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE); + RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_OBSOLETE); } else if (ikey.type == kTypeMerge) { // We know the merge type entry is not hidden, otherwise we would // have hit (A) @@ -2259,7 +2261,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, // logic could also be nicely re-used for memtable flush purge // optimization in BuildTable. merge.MergeUntil(input.get(), prev_snapshot, bottommost_level, - options_.statistics); + options_.statistics.get()); current_entry_is_merging = true; if (merge.IsSuccess()) { // Successfully found Put/Delete/(end-of-key-range) while merging @@ -2412,8 +2414,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, CompactionStats stats; stats.micros = env_->NowMicros() - start_micros - imm_micros; - if (options_.statistics) { - options_.statistics->measureTime(COMPACTION_TIME, stats.micros); + if (options_.statistics.get()) { + options_.statistics.get()->measureTime(COMPACTION_TIME, stats.micros); } stats.files_in_leveln = compact->compaction->num_input_files(0); stats.files_in_levelnp1 = compact->compaction->num_input_files(1); @@ -2554,7 +2556,7 @@ Status DBImpl::GetImpl(const ReadOptions& options, bool* value_found) { Status s; - StopWatch sw(env_, options_.statistics, DB_GET); + StopWatch sw(env_, options_.statistics.get(), DB_GET); SequenceNumber snapshot; mutex_.Lock(); if (options.snapshot != nullptr) { @@ -2605,8 +2607,8 @@ Status DBImpl::GetImpl(const ReadOptions& options, LogFlush(options_.info_log); // Note, tickers are atomic now - no lock protection needed any more. - RecordTick(options_.statistics, NUMBER_KEYS_READ); - RecordTick(options_.statistics, BYTES_READ, value->size()); + RecordTick(options_.statistics.get(), NUMBER_KEYS_READ); + RecordTick(options_.statistics.get(), BYTES_READ, value->size()); return s; } @@ -2614,7 +2616,7 @@ std::vector DBImpl::MultiGet(const ReadOptions& options, const std::vector& keys, std::vector* values) { - StopWatch sw(env_, options_.statistics, DB_MULTIGET); + StopWatch sw(env_, options_.statistics.get(), DB_MULTIGET); SequenceNumber snapshot; mutex_.Lock(); if (options.snapshot != nullptr) { @@ -2683,9 +2685,9 @@ std::vector DBImpl::MultiGet(const ReadOptions& options, mutex_.Unlock(); LogFlush(options_.info_log); - RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS); - RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys); - RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead); + RecordTick(options_.statistics.get(), NUMBER_MULTIGET_CALLS); + RecordTick(options_.statistics.get(), NUMBER_MULTIGET_KEYS_READ, numKeys); + RecordTick(options_.statistics.get(), NUMBER_MULTIGET_BYTES_READ, bytesRead); return statList; } @@ -2760,7 +2762,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { w.disableWAL = options.disableWAL; w.done = false; - StopWatch sw(env_, options_.statistics, DB_WRITE); + StopWatch sw(env_, options_.statistics.get(), DB_WRITE); MutexLock l(&mutex_); writers_.push_back(&w); while (!w.done && &w != writers_.front()) { @@ -2793,8 +2795,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { int my_batch_count = WriteBatchInternal::Count(updates); last_sequence += my_batch_count; // Record statistics - RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count); - RecordTick(options_.statistics, + RecordTick(options_.statistics.get(), + NUMBER_KEYS_WRITTEN, my_batch_count); + RecordTick(options_.statistics.get(), BYTES_WRITTEN, WriteBatchInternal::ByteSize(updates)); if (options.disableWAL) { @@ -2808,10 +2811,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { BumpPerfTime(&perf_context.wal_write_time, &timer); if (status.ok() && options.sync) { if (options_.use_fsync) { - StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS); + StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS); status = log_->file()->Fsync(); } else { - StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS); + StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS); status = log_->file()->Sync(); } } @@ -2826,7 +2829,8 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { // have succeeded in memtable but Status reports error for all writes. throw std::runtime_error("In memory WriteBatch corruption!"); } - SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence); + SetTickerCount(options_.statistics.get(), + SEQUENCE_NUMBER, last_sequence); } LogFlush(options_.info_log); mutex_.Lock(); @@ -2975,7 +2979,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { mutex_.Unlock(); uint64_t delayed; { - StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT); + StopWatch sw(env_, options_.statistics.get(), STALL_L0_SLOWDOWN_COUNT); env_->SleepForMicroseconds( SlowdownAmount(versions_->NumLevelFiles(0), options_.level0_slowdown_writes_trigger, @@ -2983,7 +2987,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { ); delayed = sw.ElapsedMicros(); } - RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed); + RecordTick(options_.statistics.get(), STALL_L0_SLOWDOWN_MICROS, delayed); stall_level0_slowdown_ += delayed; stall_level0_slowdown_count_++; allow_delay = false; // Do not delay a single write more than once @@ -3003,12 +3007,13 @@ Status DBImpl::MakeRoomForWrite(bool force) { Log(options_.info_log, "wait for memtable compaction...\n"); uint64_t stall; { - StopWatch sw(env_, options_.statistics, + StopWatch sw(env_, options_.statistics.get(), STALL_MEMTABLE_COMPACTION_COUNT); bg_cv_.Wait(); stall = sw.ElapsedMicros(); } - RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall); + RecordTick(options_.statistics.get(), + STALL_MEMTABLE_COMPACTION_MICROS, stall); stall_memtable_compaction_ += stall; stall_memtable_compaction_count_++; } else if (versions_->NumLevelFiles(0) >= @@ -3018,11 +3023,12 @@ Status DBImpl::MakeRoomForWrite(bool force) { Log(options_.info_log, "wait for fewer level0 files...\n"); uint64_t stall; { - StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT); + StopWatch sw(env_, options_.statistics.get(), + STALL_L0_NUM_FILES_COUNT); bg_cv_.Wait(); stall = sw.ElapsedMicros(); } - RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall); + RecordTick(options_.statistics.get(), STALL_L0_NUM_FILES_MICROS, stall); stall_level0_num_files_ += stall; stall_level0_num_files_count_++; } else if ( @@ -3034,7 +3040,8 @@ Status DBImpl::MakeRoomForWrite(bool force) { mutex_.Unlock(); uint64_t delayed; { - StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT); + StopWatch sw(env_, options_.statistics.get(), + HARD_RATE_LIMIT_DELAY_COUNT); env_->SleepForMicroseconds(1000); delayed = sw.ElapsedMicros(); } @@ -3043,7 +3050,8 @@ Status DBImpl::MakeRoomForWrite(bool force) { // Make sure the following value doesn't round to zero. uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1); rate_limit_delay_millis += rate_limit; - RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit); + RecordTick(options_.statistics.get(), + RATE_LIMIT_DELAY_MILLIS, rate_limit); if (options_.rate_limit_delay_max_milliseconds > 0 && rate_limit_delay_millis >= (unsigned)options_.rate_limit_delay_max_milliseconds) { @@ -3058,7 +3066,8 @@ Status DBImpl::MakeRoomForWrite(bool force) { // TODO: add statistics mutex_.Unlock(); { - StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT); + StopWatch sw(env_, options_.statistics.get(), + SOFT_RATE_LIMIT_DELAY_COUNT); env_->SleepForMicroseconds(SlowdownAmount( score, options_.soft_rate_limit, diff --git a/db/db_iter.cc b/db/db_iter.cc index 4e3c52c6e..89eaf0949 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -69,7 +69,7 @@ class DBIter: public Iterator { direction_(kForward), valid_(false), current_entry_is_merged_(false), - statistics_(options.statistics) { + statistics_(options.statistics.get()) { RecordTick(statistics_, NO_ITERATORS, 1); max_skip_ = options.max_sequential_skip_in_iterations; } @@ -135,7 +135,7 @@ class DBIter: public Iterator { Direction direction_; bool valid_; bool current_entry_is_merged_; - std::shared_ptr statistics_; + Statistics* statistics_; uint64_t max_skip_; // No copying allowed diff --git a/db/memtable.cc b/db/memtable.cc index 291899c21..ac589a563 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -19,6 +19,7 @@ #include "util/coding.h" #include "util/mutexlock.h" #include "util/murmurhash.h" +#include "util/statistics_imp.h" namespace std { template <> @@ -203,7 +204,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, assert(merge_operator); if (!merge_operator->FullMerge(key.user_key(), &v, *operands, value, logger.get())) { - RecordTick(options.statistics, NUMBER_MERGE_FAILURES); + RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES); *s = Status::Corruption("Error: Could not perform merge."); } } else { @@ -220,7 +221,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, *s = Status::OK(); if (!merge_operator->FullMerge(key.user_key(), nullptr, *operands, value, logger.get())) { - RecordTick(options.statistics, NUMBER_MERGE_FAILURES); + RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES); *s = Status::Corruption("Error: Could not perform merge."); } } else { diff --git a/db/merge_helper.cc b/db/merge_helper.cc index 9d757a5e6..a7e2df0a3 100644 --- a/db/merge_helper.cc +++ b/db/merge_helper.cc @@ -8,6 +8,7 @@ #include "rocksdb/comparator.h" #include "rocksdb/db.h" #include "rocksdb/merge_operator.h" +#include "util/statistics_imp.h" #include #include @@ -20,7 +21,7 @@ namespace rocksdb { // operands_ stores the list of merge operands encountered while merging. // keys_[i] corresponds to operands_[i] for each i. void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before, - bool at_bottom, shared_ptr stats) { + bool at_bottom, Statistics* stats) { // Get a copy of the internal key, before it's invalidated by iter->Next() // Also maintain the list of merge operands seen. keys_.clear(); diff --git a/db/merge_helper.h b/db/merge_helper.h index 34e2edd94..6fe9bfb23 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -8,7 +8,6 @@ #include "db/dbformat.h" #include "rocksdb/slice.h" -#include "rocksdb/statistics.h" #include #include @@ -18,6 +17,7 @@ class Comparator; class Iterator; class Logger; class MergeOperator; +class Statistics; class MergeHelper { public: @@ -46,7 +46,7 @@ class MergeHelper { // at_bottom: (IN) true if the iterator covers the bottem level, which means // we could reach the start of the history of this user key. void MergeUntil(Iterator* iter, SequenceNumber stop_before = 0, - bool at_bottom = false, shared_ptr stats=nullptr); + bool at_bottom = false, Statistics* stats = nullptr); // Query the merge result // These are valid until the next MergeUntil call diff --git a/db/table_cache.cc b/db/table_cache.cc index a1f466b5a..e18c20c99 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -65,12 +65,12 @@ Status TableCache::FindTable(const EnvOptions& toptions, unique_ptr file; unique_ptr table_reader; s = env_->NewRandomAccessFile(fname, &file, toptions); - RecordTick(options_->statistics, NO_FILE_OPENS); + RecordTick(options_->statistics.get(), NO_FILE_OPENS); if (s.ok()) { if (options_->advise_random_on_open) { file->Hint(RandomAccessFile::RANDOM); } - StopWatch sw(env_, options_->statistics, TABLE_OPEN_IO_MICROS); + StopWatch sw(env_, options_->statistics.get(), TABLE_OPEN_IO_MICROS); s = options_->table_factory->GetTableReader(*options_, toptions, std::move(file), file_size, &table_reader); @@ -78,7 +78,7 @@ Status TableCache::FindTable(const EnvOptions& toptions, if (!s.ok()) { assert(table_reader == nullptr); - RecordTick(options_->statistics, NO_FILE_ERRORS); + RecordTick(options_->statistics.get(), NO_FILE_ERRORS); // We do not cache error results so that if the error is transient, // or somebody repairs the file, we recover automatically. } else { diff --git a/db/version_set.cc b/db/version_set.cc index d554657b4..95db3e477 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -290,7 +290,7 @@ struct Saver { std::deque* merge_operands; // the merge operations encountered Logger* logger; bool didIO; // did we do any disk io? - shared_ptr statistics; + Statistics* statistics; }; } @@ -439,7 +439,7 @@ void Version::Get(const ReadOptions& options, saver.merge_operands = operands; saver.logger = logger.get(); saver.didIO = false; - saver.statistics = db_options.statistics; + saver.statistics = db_options.statistics.get(); stats->seek_file = nullptr; stats->seek_file_level = -1; @@ -566,7 +566,7 @@ void Version::Get(const ReadOptions& options, value, logger.get())) { *status = Status::OK(); } else { - RecordTick(db_options.statistics, NUMBER_MERGE_FAILURES); + RecordTick(db_options.statistics.get(), NUMBER_MERGE_FAILURES); *status = Status::Corruption("could not perform end-of-key merge for ", user_key); } @@ -1296,10 +1296,12 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu, } if (s.ok()) { if (options_->use_fsync) { - StopWatch sw(env_, options_->statistics, MANIFEST_FILE_SYNC_MICROS); + StopWatch sw(env_, options_->statistics.get(), + MANIFEST_FILE_SYNC_MICROS); s = descriptor_log_->file()->Fsync(); } else { - StopWatch sw(env_, options_->statistics, MANIFEST_FILE_SYNC_MICROS); + StopWatch sw(env_, options_->statistics.get(), + MANIFEST_FILE_SYNC_MICROS); s = descriptor_log_->file()->Sync(); } } diff --git a/db/write_batch.cc b/db/write_batch.cc index 134cfb63c..c04930bbf 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -20,15 +20,14 @@ // data: uint8[len] #include "rocksdb/write_batch.h" - #include "rocksdb/options.h" -#include "rocksdb/statistics.h" #include "db/dbformat.h" #include "db/db_impl.h" #include "db/memtable.h" #include "db/snapshot.h" #include "db/write_batch_internal.h" #include "util/coding.h" +#include "util/statistics_imp.h" #include namespace rocksdb { @@ -197,7 +196,7 @@ class MemTableInserter : public WriteBatch::Handler { virtual void Put(const Slice& key, const Slice& value) { if (options_->inplace_update_support && mem_->Update(sequence_, kTypeValue, key, value)) { - RecordTick(options_->statistics, NUMBER_KEYS_UPDATED); + RecordTick(options_->statistics.get(), NUMBER_KEYS_UPDATED); } else { mem_->Add(sequence_, kTypeValue, key, value); } @@ -215,7 +214,7 @@ class MemTableInserter : public WriteBatch::Handler { ropts.snapshot = &read_from_snapshot; std::string value; if (!db_->KeyMayExist(ropts, key, &value)) { - RecordTick(options_->statistics, NUMBER_FILTERED_DELETES); + RecordTick(options_->statistics.get(), NUMBER_FILTERED_DELETES); return; } } diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index 7d6a53ff8..102a4be58 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -276,27 +276,6 @@ class Statistics { // Create a concrete DBStatistics object std::shared_ptr CreateDBStatistics(); -// Ease of Use functions -inline void RecordTick(std::shared_ptr statistics, - Tickers ticker, - uint64_t count = 1) { - assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX); - assert(TickersNameMap.size() == TICKER_ENUM_MAX); - if (statistics) { - statistics->recordTick(ticker, count); - } -} - -inline void SetTickerCount(std::shared_ptr statistics, - Tickers ticker, - uint64_t count) { - assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX); - assert(TickersNameMap.size() == TICKER_ENUM_MAX); - if (statistics) { - statistics->setTickerCount(ticker, count); - } -} - } // namespace rocksdb #endif // STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 88b7a5fc7..f846b1ffd 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -272,7 +272,8 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents, CompressionType type, BlockHandle* handle) { Rep* r = rep_; - StopWatch sw(r->options.env, r->options.statistics, WRITE_RAW_BLOCK_MICROS); + StopWatch sw(r->options.env, r->options.statistics.get(), + WRITE_RAW_BLOCK_MICROS); handle->set_offset(r->offset); handle->set_size(block_contents.size()); r->status = r->file->Append(block_contents); diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 5a2690103..095c2999c 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -200,7 +200,7 @@ Cache::Handle* GetFromBlockCache( const Slice& key, Tickers block_cache_miss_ticker, Tickers block_cache_hit_ticker, - std::shared_ptr statistics) { + Statistics* statistics) { auto cache_handle = block_cache->Lookup(key); if (cache_handle != nullptr) { BumpPerfCount(&perf_context.block_cache_hit_count); @@ -515,7 +515,7 @@ Status BlockBasedTable::GetBlock( CachableEntry* entry) { bool no_io = options.read_tier == kBlockCacheTier; Cache* block_cache = table->rep_->options.block_cache.get(); - auto statistics = table->rep_->options.statistics; + Statistics* statistics = table->rep_->options.statistics.get(); Status s; if (block_cache != nullptr) { @@ -532,7 +532,7 @@ Status BlockBasedTable::GetBlock( key, block_cache_miss_ticker, block_cache_hit_ticker, - table->rep_->options.statistics + statistics ); if (entry->cache_handle != nullptr) { @@ -593,7 +593,7 @@ Iterator* BlockBasedTable::BlockReader(void* arg, Cache* block_cache = table->rep_->options.block_cache.get(); Cache* block_cache_compressed = table->rep_->options. block_cache_compressed.get(); - std::shared_ptr statistics = table->rep_->options.statistics; + Statistics* statistics = table->rep_->options.statistics.get(); Block* block = nullptr; Block* cblock = nullptr; Cache::Handle* cache_handle = nullptr; @@ -791,12 +791,13 @@ BlockBasedTable::GetFilter(bool no_io) const { cache_key ); + Statistics* statistics = rep_->options.statistics.get(); auto cache_handle = GetFromBlockCache( block_cache, key, BLOCK_CACHE_FILTER_MISS, BLOCK_CACHE_FILTER_HIT, - rep_->options.statistics + statistics ); FilterBlockReader* filter = nullptr; @@ -824,7 +825,7 @@ BlockBasedTable::GetFilter(bool no_io) const { cache_handle = block_cache->Insert( key, filter, filter_size, &DeleteCachedFilter); - RecordTick(rep_->options.statistics, BLOCK_CACHE_ADD); + RecordTick(statistics, BLOCK_CACHE_ADD); } } } @@ -945,9 +946,10 @@ bool BlockBasedTable::PrefixMayMatch(const Slice& internal_prefix) { filter_entry.Release(rep_->options.block_cache.get()); } - RecordTick(rep_->options.statistics, BLOOM_FILTER_PREFIX_CHECKED); + Statistics* statistics = rep_->options.statistics.get(); + RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED); if (!may_match) { - RecordTick(rep_->options.statistics, BLOOM_FILTER_PREFIX_USEFUL); + RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL); } return may_match; @@ -997,7 +999,7 @@ Status BlockBasedTable::Get( // Not found // TODO: think about interaction with Merge. If a user key cannot // cross one data block, we should be fine. - RecordTick(rep_->options.statistics, BLOOM_FILTER_USEFUL); + RecordTick(rep_->options.statistics.get(), BLOOM_FILTER_USEFUL); break; } else { bool didIO = false; diff --git a/util/statistics_imp.h b/util/statistics_imp.h new file mode 100644 index 000000000..0dc8884c1 --- /dev/null +++ b/util/statistics_imp.h @@ -0,0 +1,32 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once +#include "rocksdb/statistics.h" + +namespace rocksdb { + +// Utility functions +inline void RecordTick(Statistics* statistics, + Tickers ticker, + uint64_t count = 1) { + assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX); + assert(TickersNameMap.size() == TICKER_ENUM_MAX); + if (statistics) { + statistics->recordTick(ticker, count); + } +} + +inline void SetTickerCount(Statistics* statistics, + Tickers ticker, + uint64_t count) { + assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX); + assert(TickersNameMap.size() == TICKER_ENUM_MAX); + if (statistics) { + statistics->setTickerCount(ticker, count); + } +} + +} diff --git a/util/stop_watch.h b/util/stop_watch.h index f251b6bc1..e36bcb7ec 100644 --- a/util/stop_watch.h +++ b/util/stop_watch.h @@ -5,16 +5,16 @@ // #pragma once #include "rocksdb/env.h" -#include "rocksdb/statistics.h" +#include "util/statistics_imp.h" namespace rocksdb { // Auto-scoped. // Records the statistic into the corresponding histogram. class StopWatch { public: - StopWatch( + explicit StopWatch( Env * const env, - std::shared_ptr statistics = nullptr, + Statistics* statistics = nullptr, const Histograms histogram_name = DB_GET) : env_(env), start_time_(env->NowMicros()), @@ -36,7 +36,7 @@ class StopWatch { private: Env* const env_; const uint64_t start_time_; - std::shared_ptr statistics_; + Statistics* statistics_; const Histograms histogram_name_; }; @@ -44,7 +44,7 @@ class StopWatch { // a nano second precision stopwatch class StopWatchNano { public: - StopWatchNano(Env* const env, bool auto_start = false) + explicit StopWatchNano(Env* const env, bool auto_start = false) : env_(env), start_(0) { if (auto_start) { Start();