You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/util/compaction_job_stats_impl.cc

95 lines
2.7 KiB

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "rocksdb/compaction_job_stats.h"
namespace ROCKSDB_NAMESPACE {
void CompactionJobStats::Reset() {
elapsed_micros = 0;
cpu_micros = 0;
Compare the number of input keys and processed keys for compactions (#11571) Summary: ... to improve data integrity validation during compaction. A new option `compaction_verify_record_count` is introduced for this verification and is enabled by default. One exception when the verification is not done is when a compaction filter returns kRemoveAndSkipUntil which can cause CompactionIterator to seek until some key and hence not able to keep track of the number of keys processed. For expected number of input keys, we sum over the number of total keys - number of range tombstones across compaction input files (`CompactionJob::UpdateCompactionStats()`). Table properties are consulted if `FileMetaData` is not initialized for some input file. Since table properties for all input files were also constructed during `DBImpl::NotifyOnCompactionBegin()`, `Compaction::GetTableProperties()` is introduced to reduce duplicated code. For actual number of keys processed, each subcompaction will record its number of keys processed to `sub_compact->compaction_job_stats.num_input_records` and aggregated when all subcompactions finish (`CompactionJob::AggregateCompactionStats()`). In the case when some subcompaction encountered kRemoveAndSkipUntil from compaction filter and does not have accurate count, it propagates this information through `sub_compact->compaction_job_stats.has_num_input_records`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11571 Test Plan: * Add a new unit test `DBCompactionTest.VerifyRecordCount` for the corruption case. * All other unit tests for non-corrupted case. * Ran crash test for a few hours: `python3 ./tools/db_crashtest.py whitebox --simple` Reviewed By: ajkr Differential Revision: D47131965 Pulled By: cbi42 fbshipit-source-id: cc8e94565dd526c4347e9d3843ecf32f6727af92
1 year ago
has_num_input_records = true;
num_input_records = 0;
num_blobs_read = 0;
num_input_files = 0;
num_input_files_at_output_level = 0;
num_output_records = 0;
num_output_files = 0;
num_output_files_blob = 0;
is_full_compaction = false;
is_manual_compaction = false;
total_input_bytes = 0;
total_blob_bytes_read = 0;
total_output_bytes = 0;
total_output_bytes_blob = 0;
num_records_replaced = 0;
total_input_raw_key_bytes = 0;
total_input_raw_value_bytes = 0;
num_input_deletion_records = 0;
num_expired_deletion_records = 0;
num_corrupt_keys = 0;
file_write_nanos = 0;
file_range_sync_nanos = 0;
file_fsync_nanos = 0;
file_prepare_write_nanos = 0;
smallest_output_key_prefix.clear();
largest_output_key_prefix.clear();
num_single_del_fallthru = 0;
num_single_del_mismatch = 0;
}
void CompactionJobStats::Add(const CompactionJobStats& stats) {
elapsed_micros += stats.elapsed_micros;
cpu_micros += stats.cpu_micros;
Compare the number of input keys and processed keys for compactions (#11571) Summary: ... to improve data integrity validation during compaction. A new option `compaction_verify_record_count` is introduced for this verification and is enabled by default. One exception when the verification is not done is when a compaction filter returns kRemoveAndSkipUntil which can cause CompactionIterator to seek until some key and hence not able to keep track of the number of keys processed. For expected number of input keys, we sum over the number of total keys - number of range tombstones across compaction input files (`CompactionJob::UpdateCompactionStats()`). Table properties are consulted if `FileMetaData` is not initialized for some input file. Since table properties for all input files were also constructed during `DBImpl::NotifyOnCompactionBegin()`, `Compaction::GetTableProperties()` is introduced to reduce duplicated code. For actual number of keys processed, each subcompaction will record its number of keys processed to `sub_compact->compaction_job_stats.num_input_records` and aggregated when all subcompactions finish (`CompactionJob::AggregateCompactionStats()`). In the case when some subcompaction encountered kRemoveAndSkipUntil from compaction filter and does not have accurate count, it propagates this information through `sub_compact->compaction_job_stats.has_num_input_records`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11571 Test Plan: * Add a new unit test `DBCompactionTest.VerifyRecordCount` for the corruption case. * All other unit tests for non-corrupted case. * Ran crash test for a few hours: `python3 ./tools/db_crashtest.py whitebox --simple` Reviewed By: ajkr Differential Revision: D47131965 Pulled By: cbi42 fbshipit-source-id: cc8e94565dd526c4347e9d3843ecf32f6727af92
1 year ago
has_num_input_records &= stats.has_num_input_records;
num_input_records += stats.num_input_records;
num_blobs_read += stats.num_blobs_read;
num_input_files += stats.num_input_files;
num_input_files_at_output_level += stats.num_input_files_at_output_level;
num_output_records += stats.num_output_records;
num_output_files += stats.num_output_files;
num_output_files_blob += stats.num_output_files_blob;
total_input_bytes += stats.total_input_bytes;
total_blob_bytes_read += stats.total_blob_bytes_read;
total_output_bytes += stats.total_output_bytes;
total_output_bytes_blob += stats.total_output_bytes_blob;
num_records_replaced += stats.num_records_replaced;
total_input_raw_key_bytes += stats.total_input_raw_key_bytes;
total_input_raw_value_bytes += stats.total_input_raw_value_bytes;
num_input_deletion_records += stats.num_input_deletion_records;
num_expired_deletion_records += stats.num_expired_deletion_records;
num_corrupt_keys += stats.num_corrupt_keys;
file_write_nanos += stats.file_write_nanos;
file_range_sync_nanos += stats.file_range_sync_nanos;
file_fsync_nanos += stats.file_fsync_nanos;
file_prepare_write_nanos += stats.file_prepare_write_nanos;
num_single_del_fallthru += stats.num_single_del_fallthru;
num_single_del_mismatch += stats.num_single_del_mismatch;
}
} // namespace ROCKSDB_NAMESPACE