Single Delete Mismatch and Fallthrough statistics

Summary:
Added 2 statistics in compaction job statistics, to
identify if single deletes are not meeting a matching key
(fallthrough) or single deletes are meeting a merge, delete or
another single delete (i.e. not the expected case of put).

Test Plan: Tested the statistics using write_stress and compaction_job_stats_test

Reviewers: sdong

Reviewed By: sdong

Subscribers: andrewkr, dhruba

Differential Revision: https://reviews.facebook.net/D61749
main
Anirban Rahut 8 years ago
parent 3771e37970
commit 2fc2fd92a9
  1. 10
      db/compaction_iterator.cc
  2. 4
      db/compaction_iterator.h
  3. 11
      db/compaction_job.cc
  4. 6
      include/rocksdb/compaction_job_stats.h
  5. 6
      util/compaction_job_stats_impl.cc

@ -254,6 +254,7 @@ void CompactionIterator::NextFromInput() {
// those operations for a given key is documented as being undefined. So // those operations for a given key is documented as being undefined. So
// we can choose how to handle such a combinations of operations. We will // we can choose how to handle such a combinations of operations. We will
// try to compact out as much as we can in these cases. // try to compact out as much as we can in these cases.
// We will report counts on these anomalous cases.
// The easiest way to process a SingleDelete during iteration is to peek // The easiest way to process a SingleDelete during iteration is to peek
// ahead at the next key. // ahead at the next key.
@ -276,6 +277,7 @@ void CompactionIterator::NextFromInput() {
// First SingleDelete has been skipped since we already called // First SingleDelete has been skipped since we already called
// input_->Next(). // input_->Next().
++iter_stats_.num_record_drop_obsolete; ++iter_stats_.num_record_drop_obsolete;
++iter_stats_.num_single_del_mismatch;
} else if ((ikey_.sequence <= earliest_write_conflict_snapshot_) || } else if ((ikey_.sequence <= earliest_write_conflict_snapshot_) ||
has_outputted_key_) { has_outputted_key_) {
// Found a matching value, we can drop the single delete and the // Found a matching value, we can drop the single delete and the
@ -285,7 +287,12 @@ void CompactionIterator::NextFromInput() {
// Note: it doesn't matter whether the second key is a Put or if it // Note: it doesn't matter whether the second key is a Put or if it
// is an unexpected Merge or Delete. We will compact it out // is an unexpected Merge or Delete. We will compact it out
// either way. // either way. We will maintain counts of how many mismatches
// happened
if (next_ikey.type != kTypeValue) {
++iter_stats_.num_single_del_mismatch;
}
++iter_stats_.num_record_drop_hidden; ++iter_stats_.num_record_drop_hidden;
++iter_stats_.num_record_drop_obsolete; ++iter_stats_.num_record_drop_obsolete;
// Already called input_->Next() once. Call it a second time to // Already called input_->Next() once. Call it a second time to
@ -326,6 +333,7 @@ void CompactionIterator::NextFromInput() {
// Key doesn't exist outside of this range. // Key doesn't exist outside of this range.
// Can compact out this SingleDelete. // Can compact out this SingleDelete.
++iter_stats_.num_record_drop_obsolete; ++iter_stats_.num_record_drop_obsolete;
++iter_stats_.num_single_del_fallthru;
} else { } else {
// Output SingleDelete // Output SingleDelete
valid_ = true; valid_ = true;

@ -34,6 +34,10 @@ struct CompactionIteratorStats {
uint64_t num_input_corrupt_records = 0; uint64_t num_input_corrupt_records = 0;
uint64_t total_input_raw_key_bytes = 0; uint64_t total_input_raw_key_bytes = 0;
uint64_t total_input_raw_value_bytes = 0; uint64_t total_input_raw_value_bytes = 0;
// Single-Delete diagnostics for exceptional situations
uint64_t num_single_del_fallthru = 0;
uint64_t num_single_del_mismatch = 0;
}; };
class CompactionIterator { class CompactionIterator {

@ -624,6 +624,13 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
<< "num_output_records" << compact_->num_output_records << "num_output_records" << compact_->num_output_records
<< "num_subcompactions" << compact_->sub_compact_states.size(); << "num_subcompactions" << compact_->sub_compact_states.size();
if (compaction_job_stats_ != nullptr) {
stream << "num_single_delete_mismatches"
<< compaction_job_stats_->num_single_del_mismatch;
stream << "num_single_delete_fallthrough"
<< compaction_job_stats_->num_single_del_fallthru;
}
if (measure_io_stats_ && compaction_job_stats_ != nullptr) { if (measure_io_stats_ && compaction_job_stats_ != nullptr) {
stream << "file_write_nanos" << compaction_job_stats_->file_write_nanos; stream << "file_write_nanos" << compaction_job_stats_->file_write_nanos;
stream << "file_range_sync_nanos" stream << "file_range_sync_nanos"
@ -850,6 +857,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
c_iter_stats.num_input_deletion_records; c_iter_stats.num_input_deletion_records;
sub_compact->compaction_job_stats.num_corrupt_keys = sub_compact->compaction_job_stats.num_corrupt_keys =
c_iter_stats.num_input_corrupt_records; c_iter_stats.num_input_corrupt_records;
sub_compact->compaction_job_stats.num_single_del_fallthru =
c_iter_stats.num_single_del_fallthru;
sub_compact->compaction_job_stats.num_single_del_mismatch =
c_iter_stats.num_single_del_mismatch;
sub_compact->compaction_job_stats.total_input_raw_key_bytes += sub_compact->compaction_job_stats.total_input_raw_key_bytes +=
c_iter_stats.total_input_raw_key_bytes; c_iter_stats.total_input_raw_key_bytes;
sub_compact->compaction_job_stats.total_input_raw_value_bytes += sub_compact->compaction_job_stats.total_input_raw_value_bytes +=

@ -81,5 +81,11 @@ struct CompactionJobStats {
std::string smallest_output_key_prefix; std::string smallest_output_key_prefix;
std::string largest_output_key_prefix; std::string largest_output_key_prefix;
// number of single-deletes which do not meet a put
uint64_t num_single_del_fallthru;
// number of single-deletes which meet something other than a put
uint64_t num_single_del_mismatch;
}; };
} // namespace rocksdb } // namespace rocksdb

@ -38,6 +38,9 @@ void CompactionJobStats::Reset() {
file_range_sync_nanos = 0; file_range_sync_nanos = 0;
file_fsync_nanos = 0; file_fsync_nanos = 0;
file_prepare_write_nanos = 0; file_prepare_write_nanos = 0;
num_single_del_fallthru = 0;
num_single_del_mismatch = 0;
} }
void CompactionJobStats::Add(const CompactionJobStats& stats) { void CompactionJobStats::Add(const CompactionJobStats& stats) {
@ -67,6 +70,9 @@ void CompactionJobStats::Add(const CompactionJobStats& stats) {
file_range_sync_nanos += stats.file_range_sync_nanos; file_range_sync_nanos += stats.file_range_sync_nanos;
file_fsync_nanos += stats.file_fsync_nanos; file_fsync_nanos += stats.file_fsync_nanos;
file_prepare_write_nanos += stats.file_prepare_write_nanos; file_prepare_write_nanos += stats.file_prepare_write_nanos;
num_single_del_fallthru += stats.num_single_del_fallthru;
num_single_del_mismatch += stats.num_single_del_mismatch;
} }
#else #else

Loading…
Cancel
Save