Compaction Read/Write Stats by Compaction Type (#7165)

Summary:
Adds compaction statistics (total bytes read and written) for compactions that occur for delete-triggered, periodic, and TTL compaction reasons.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7165

Test Plan:
TTL and periodic can be checked by runnning db_bench with the options activated:

/db_bench --benchmarks="fillrandom,stats" --statistics --num=10000000 -base_background_compactions=16 -periodic_compaction_seconds=1
./db_bench --benchmarks="fillrandom,stats" --statistics --num=10000000 -base_background_compactions=16 -fifo_compaction_ttl=1

Setting the time to one second causes non-zero bytes read/written for those compaction reasons. Disabling them or setting them to times longer than the test run length causes the stats to return to zero as expected.

Delete-triggered compaction counting is tested in DBTablePropertiesTest.DeletionTriggeredCompactionMarking

Reviewed By: ajkr

Differential Revision: D22693050

Pulled By: akabcenell

fbshipit-source-id: d15cef4d94576f703015c8942d5f0d492f69401d
main
Aaron Kabcenell 4 years ago committed by Facebook GitHub Bot
parent 50f206ad84
commit 56ed601df3
  1. 14
      db/compaction/compaction_job.cc
  2. 3
      db/db_table_properties_test.cc
  3. 8
      include/rocksdb/statistics.h
  4. 24
      java/rocksjni/portal.h
  5. 10
      java/src/main/java/org/rocksdb/TickerType.java
  6. 6
      monitoring/statistics.cc
  7. 6
      tools/db_bench_tool.cc

@ -1509,10 +1509,22 @@ Status CompactionJob::InstallCompactionResults(
void CompactionJob::RecordCompactionIOStats() {
RecordTick(stats_, COMPACT_READ_BYTES, IOSTATS(bytes_read));
RecordTick(stats_, COMPACT_WRITE_BYTES, IOSTATS(bytes_written));
CompactionReason compaction_reason =
compact_->compaction->compaction_reason();
if (compaction_reason == CompactionReason::kFilesMarkedForCompaction) {
RecordTick(stats_, COMPACT_READ_BYTES_MARKED, IOSTATS(bytes_read));
RecordTick(stats_, COMPACT_WRITE_BYTES_MARKED, IOSTATS(bytes_written));
} else if (compaction_reason == CompactionReason::kPeriodicCompaction) {
RecordTick(stats_, COMPACT_READ_BYTES_PERIODIC, IOSTATS(bytes_read));
RecordTick(stats_, COMPACT_WRITE_BYTES_PERIODIC, IOSTATS(bytes_written));
} else if (compaction_reason == CompactionReason::kTtl) {
RecordTick(stats_, COMPACT_READ_BYTES_TTL, IOSTATS(bytes_read));
RecordTick(stats_, COMPACT_WRITE_BYTES_TTL, IOSTATS(bytes_written));
}
ThreadStatusUtil::IncreaseThreadOperationProperty(
ThreadStatus::COMPACTION_BYTES_READ, IOSTATS(bytes_read));
IOSTATS_RESET(bytes_read);
RecordTick(stats_, COMPACT_WRITE_BYTES, IOSTATS(bytes_written));
ThreadStatusUtil::IncreaseThreadOperationProperty(
ThreadStatus::COMPACTION_BYTES_WRITTEN, IOSTATS(bytes_written));
IOSTATS_RESET(bytes_written);

@ -294,6 +294,7 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger);
Options opts = CurrentOptions();
opts.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
opts.table_properties_collector_factories.emplace_back(compact_on_del);
if(GetParam() == "kCompactionStyleUniversal") {
@ -364,6 +365,8 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(1, NumTableFilesAtLevel(0));
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_WRITE_BYTES_MARKED));
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_READ_BYTES_MARKED));
}
INSTANTIATE_TEST_CASE_P(

@ -205,6 +205,14 @@ enum Tickers : uint32_t {
COMPACT_WRITE_BYTES, // Bytes written during compaction
FLUSH_WRITE_BYTES, // Bytes written during flush
// Compaction read and write statistics broken down by CompactionReason
COMPACT_READ_BYTES_MARKED,
COMPACT_READ_BYTES_PERIODIC,
COMPACT_READ_BYTES_TTL,
COMPACT_WRITE_BYTES_MARKED,
COMPACT_WRITE_BYTES_PERIODIC,
COMPACT_WRITE_BYTES_TTL,
// Number of table's properties loaded directly from file, without creating
// table reader object.
NUMBER_DIRECT_LOAD_TABLE_PROPERTIES,

@ -4949,6 +4949,18 @@ class TickerTypeJni {
return -0x0E;
case ROCKSDB_NAMESPACE::Tickers::FILES_DELETED_IMMEDIATELY:
return -0X0F;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_MARKED:
return -0x10;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_PERIODIC:
return -0x11;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_TTL:
return -0x12;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_MARKED:
return -0x13;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_PERIODIC:
return -0x14;
case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_TTL:
return -0x15;
case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX:
// 0x5F for backwards compatibility on current minor version.
@ -5249,6 +5261,18 @@ class TickerTypeJni {
return ROCKSDB_NAMESPACE::Tickers::FILES_MARKED_TRASH;
case -0x0F:
return ROCKSDB_NAMESPACE::Tickers::FILES_DELETED_IMMEDIATELY;
case -0x10:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_MARKED;
case -0x11:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_PERIODIC;
case -0x12:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_TTL;
case -0x13:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_MARKED;
case -0x14:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_PERIODIC;
case -0x15:
return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_TTL;
case 0x5F:
// 0x5F for backwards compatibility on current minor version.
return ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX;

@ -732,6 +732,16 @@ public enum TickerType {
*/
FILES_DELETED_IMMEDIATELY((byte) -0x0f),
/**
* Compaction read and write statistics broken down by CompactionReason
*/
COMPACT_READ_BYTES_MARKED((byte) -0x10),
COMPACT_READ_BYTES_PERIODIC((byte) -0x11),
COMPACT_READ_BYTES_TTL((byte) -0x12),
COMPACT_WRITE_BYTES_MARKED((byte) -0x13),
COMPACT_WRITE_BYTES_PERIODIC((byte) -0x14),
COMPACT_WRITE_BYTES_TTL((byte) -0x15),
TICKER_ENUM_MAX((byte) 0x5F);
private final byte value;

@ -105,6 +105,12 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
{COMPACT_READ_BYTES, "rocksdb.compact.read.bytes"},
{COMPACT_WRITE_BYTES, "rocksdb.compact.write.bytes"},
{FLUSH_WRITE_BYTES, "rocksdb.flush.write.bytes"},
{COMPACT_READ_BYTES_MARKED, "rocksdb.compact.read.marked.bytes"},
{COMPACT_READ_BYTES_PERIODIC, "rocksdb.compact.read.periodic.bytes"},
{COMPACT_READ_BYTES_TTL, "rocksdb.compact.read.ttl.bytes"},
{COMPACT_WRITE_BYTES_MARKED, "rocksdb.compact.write.marked.bytes"},
{COMPACT_WRITE_BYTES_PERIODIC, "rocksdb.compact.write.periodic.bytes"},
{COMPACT_WRITE_BYTES_TTL, "rocksdb.compact.write.ttl.bytes"},
{NUMBER_DIRECT_LOAD_TABLE_PROPERTIES,
"rocksdb.number.direct.load.table.properties"},
{NUMBER_SUPERVERSION_ACQUIRES, "rocksdb.number.superversion_acquires"},

@ -685,6 +685,11 @@ DEFINE_int32(level0_file_num_compaction_trigger,
"Number of files in level-0"
" when compactions start");
DEFINE_uint64(periodic_compaction_seconds,
ROCKSDB_NAMESPACE::Options().periodic_compaction_seconds,
"Files older than this will be picked up for compaction and"
" rewritten to the same level");
static bool ValidateInt32Percent(const char* flagname, int32_t value) {
if (value <= 0 || value>=100) {
fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n",
@ -3962,6 +3967,7 @@ class Benchmark {
options.max_compaction_bytes = FLAGS_max_compaction_bytes;
options.disable_auto_compactions = FLAGS_disable_auto_compactions;
options.optimize_filters_for_hits = FLAGS_optimize_filters_for_hits;
options.periodic_compaction_seconds = FLAGS_periodic_compaction_seconds;
// fill storage options
options.advise_random_on_open = FLAGS_advise_random_on_open;

Loading…
Cancel
Save