|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
|
|
|
|
#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
|
|
|
#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "rocksdb/perf_level.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
12 years ago
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
// A thread local context for gathering performance counter efficiently
|
|
|
|
// and transparently.
|
|
|
|
// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats.
|
|
|
|
|
|
|
|
struct PerfContext {
|
|
|
|
|
|
|
|
void Reset(); // reset all performance counters to zero
|
|
|
|
|
|
|
|
std::string ToString() const;
|
|
|
|
|
|
|
|
uint64_t user_key_comparison_count; // total number of user key comparisons
|
|
|
|
uint64_t block_cache_hit_count; // total number of block cache hits
|
|
|
|
uint64_t block_read_count; // total number of block reads (with IO)
|
|
|
|
uint64_t block_read_byte; // total number of bytes from block reads
|
|
|
|
uint64_t block_read_time; // total nanos spent on block reads
|
|
|
|
uint64_t block_checksum_time; // total nanos spent on block checksum
|
|
|
|
uint64_t block_decompress_time; // total nanos spent on block decompression
|
|
|
|
// total number of internal keys skipped over during iteration (overwritten or
|
|
|
|
// deleted, to be more specific, hidden by a put or delete of the same key)
|
|
|
|
uint64_t internal_key_skipped_count;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
9 years ago
|
|
|
// total number of deletes and single deletes skipped over during iteration
|
|
|
|
uint64_t internal_delete_skipped_count;
|
|
|
|
|
|
|
|
uint64_t get_snapshot_time; // total nanos spent on getting snapshot
|
|
|
|
uint64_t get_from_memtable_time; // total nanos spent on querying memtables
|
|
|
|
uint64_t get_from_memtable_count; // number of mem tables queried
|
|
|
|
// total nanos spent after Get() finds a key
|
|
|
|
uint64_t get_post_process_time;
|
|
|
|
uint64_t get_from_output_files_time; // total nanos reading from output files
|
|
|
|
// total nanos spent on seeking memtable
|
|
|
|
uint64_t seek_on_memtable_time;
|
|
|
|
// number of seeks issued on memtable
|
|
|
|
uint64_t seek_on_memtable_count;
|
|
|
|
// total nanos spent on seeking child iters
|
|
|
|
uint64_t seek_child_seek_time;
|
|
|
|
// number of seek issued in child iterators
|
|
|
|
uint64_t seek_child_seek_count;
|
|
|
|
uint64_t seek_min_heap_time; // total nanos spent on the merge heap
|
|
|
|
// total nanos spent on seeking the internal entries
|
|
|
|
uint64_t seek_internal_seek_time;
|
|
|
|
// total nanos spent on iterating internal entries to find the next user entry
|
|
|
|
uint64_t find_next_user_entry_time;
|
|
|
|
|
|
|
|
// total nanos spent on writing to WAL
|
|
|
|
uint64_t write_wal_time;
|
|
|
|
// total nanos spent on writing to mem tables
|
|
|
|
uint64_t write_memtable_time;
|
|
|
|
// total nanos spent on delaying write
|
|
|
|
uint64_t write_delay_time;
|
|
|
|
// total nanos spent on writing a record, excluding the above three times
|
|
|
|
uint64_t write_pre_and_post_process_time;
|
|
|
|
|
|
|
|
uint64_t db_mutex_lock_nanos; // time spent on acquiring DB mutex.
|
|
|
|
// Time spent on waiting with a condition variable created with DB mutex.
|
|
|
|
uint64_t db_condition_wait_nanos;
|
|
|
|
// Time spent on merge operator.
|
|
|
|
uint64_t merge_operator_time_nanos;
|
|
|
|
|
|
|
|
// Time spent on reading index block from block cache or SST file
|
|
|
|
uint64_t read_index_block_nanos;
|
|
|
|
// Time spent on reading filter block from block cache or SST file
|
|
|
|
uint64_t read_filter_block_nanos;
|
|
|
|
// Time spent on creating data block iterator
|
|
|
|
uint64_t new_table_block_iter_nanos;
|
|
|
|
// Time spent on creating a iterator of an SST file.
|
|
|
|
uint64_t new_table_iterator_nanos;
|
|
|
|
// Time spent on seeking a key in data/index blocks
|
|
|
|
uint64_t block_seek_nanos;
|
|
|
|
// Time spent on finding or creating a table reader
|
|
|
|
uint64_t find_table_nanos;
|
|
|
|
// total number of mem table bloom hits
|
|
|
|
uint64_t bloom_memtable_hit_count;
|
|
|
|
// total number of mem table bloom misses
|
|
|
|
uint64_t bloom_memtable_miss_count;
|
|
|
|
// total number of SST table bloom hits
|
|
|
|
uint64_t bloom_sst_hit_count;
|
|
|
|
// total number of SST table bloom misses
|
|
|
|
uint64_t bloom_sst_miss_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(NPERF_CONTEXT) || defined(IOS_CROSS_COMPILE)
|
|
|
|
extern PerfContext perf_context;
|
|
|
|
#elif _WIN32
|
|
|
|
extern __declspec(thread) PerfContext perf_context;
|
|
|
|
#else
|
|
|
|
extern __thread PerfContext perf_context;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|