Report compaction reason in CompactionListener

Summary:
Add CompactionReason to CompactionJobInfo
This will allow users to understand why compaction started which will help options tuning

Test Plan:
added new tests
make check -j64

Reviewers: yhchiang, anthony, kradhakrishnan, sdong, rven

Reviewed By: rven

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D51975
main
Islam AbdelRahman 9 years ago
parent 8ac7fb8377
commit d005c66faf
  1. 1
      HISTORY.md
  2. 9
      db/compaction.cc
  3. 8
      db/compaction.h
  4. 29
      db/compaction_picker.cc
  5. 1
      db/db_impl.cc
  6. 161
      db/listener_test.cc
  7. 23
      include/rocksdb/listener.h

@ -5,6 +5,7 @@
* Change names in CompactionPri and add a new one.
* Deprecate options.soft_rate_limit and add options.soft_pending_compaction_bytes_limit.
* If options.max_write_buffer_number > 3, writes will be slowed down when writing to the last write buffer to delay a full stop.
* Introduce CompactionJobInfo::compaction_reason, this field include the reason to trigger the compaction.
## 4.3.0 (12/8/2015)
### New Features

@ -147,7 +147,8 @@ Compaction::Compaction(VersionStorageInfo* vstorage,
uint32_t _output_path_id, CompressionType _compression,
std::vector<FileMetaData*> _grandparents,
bool _manual_compaction, double _score,
bool _deletion_compaction)
bool _deletion_compaction,
CompactionReason _compaction_reason)
: start_level_(_inputs[0].level),
output_level_(_output_level),
max_output_file_size_(_target_file_size),
@ -167,8 +168,12 @@ Compaction::Compaction(VersionStorageInfo* vstorage,
score_(_score),
bottommost_level_(IsBottommostLevel(output_level_, vstorage, inputs_)),
is_full_compaction_(IsFullCompaction(vstorage, inputs_)),
is_manual_compaction_(_manual_compaction) {
is_manual_compaction_(_manual_compaction),
compaction_reason_(_compaction_reason) {
MarkFilesBeingCompacted(true);
if (is_manual_compaction_) {
compaction_reason_ = CompactionReason::kManualCompaction;
}
#ifndef NDEBUG
for (size_t i = 1; i < inputs_.size(); ++i) {

@ -41,7 +41,8 @@ class Compaction {
uint32_t output_path_id, CompressionType compression,
std::vector<FileMetaData*> grandparents,
bool manual_compaction = false, double score = -1,
bool deletion_compaction = false);
bool deletion_compaction = false,
CompactionReason compaction_reason = CompactionReason::kUnknown);
// No copying allowed
Compaction(const Compaction&) = delete;
@ -220,6 +221,8 @@ class Compaction {
Slice GetLargestUserKey() const { return largest_user_key_; }
CompactionReason compaction_reason() { return compaction_reason_; }
private:
// mark (or clear) all files that are being compacted
void MarkFilesBeingCompacted(bool mark_as_compacted);
@ -289,6 +292,9 @@ class Compaction {
// largest user keys in compaction
Slice largest_user_key_;
// Reason for compaction
CompactionReason compaction_reason_;
};
// Utility function

@ -916,6 +916,7 @@ Compaction* LevelCompactionPicker::PickCompaction(
int base_index = -1;
CompactionInputFiles inputs;
double score = 0;
CompactionReason compaction_reason = CompactionReason::kUnknown;
// Find the compactions by size on all levels.
for (int i = 0; i < NumberLevels() - 1; i++) {
@ -928,6 +929,13 @@ Compaction* LevelCompactionPicker::PickCompaction(
&parent_index, &base_index) &&
ExpandWhileOverlapping(cf_name, vstorage, &inputs)) {
// found the compaction!
if (level == 0) {
// L0 score = `num L0 files` / `level0_file_num_compaction_trigger`
compaction_reason = CompactionReason::kLevelL0FilesNum;
} else {
// L1+ score = `Level files size` / `MaxBytesForLevel`
compaction_reason = CompactionReason::kLevelMaxLevelSize;
}
break;
} else {
// didn't find the compaction, clear the inputs
@ -944,6 +952,9 @@ Compaction* LevelCompactionPicker::PickCompaction(
parent_index = base_index = -1;
PickFilesMarkedForCompactionExperimental(cf_name, vstorage, &inputs, &level,
&output_level);
if (!inputs.empty()) {
compaction_reason = CompactionReason::kFilesMarkedForCompaction;
}
}
if (inputs.empty()) {
return nullptr;
@ -994,7 +1005,8 @@ Compaction* LevelCompactionPicker::PickCompaction(
mutable_cf_options.MaxGrandParentOverlapBytes(level),
GetPathId(ioptions_, mutable_cf_options, output_level),
GetCompressionType(ioptions_, output_level, vstorage->base_level()),
std::move(grandparents), is_manual, score);
std::move(grandparents), is_manual, score,
false /* deletion_compaction */, compaction_reason);
// If it's level 0 compaction, make sure we don't execute any other level 0
// compactions in parallel
@ -1595,11 +1607,18 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
file_num_buf);
}
CompactionReason compaction_reason;
if (max_number_of_files_to_compact == UINT_MAX) {
compaction_reason = CompactionReason::kUniversalSortedRunNum;
} else {
compaction_reason = CompactionReason::kUniversalSizeRatio;
}
return new Compaction(
vstorage, mutable_cf_options, std::move(inputs), output_level,
mutable_cf_options.MaxFileSizeForLevel(output_level), LLONG_MAX, path_id,
GetCompressionType(ioptions_, start_level, 1, enable_compression),
/* grandparents */ {}, /* is manual */ false, score);
/* grandparents */ {}, /* is manual */ false, score,
false /* deletion_compaction */, compaction_reason);
}
// Look at overall size amplification. If size amplification
@ -1723,7 +1742,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
mutable_cf_options.MaxFileSizeForLevel(vstorage->num_levels() - 1),
/* max_grandparent_overlap_bytes */ LLONG_MAX, path_id,
GetCompressionType(ioptions_, vstorage->num_levels() - 1, 1),
/* grandparents */ {}, /* is manual */ false, score);
/* grandparents */ {}, /* is manual */ false, score,
false /* deletion_compaction */,
CompactionReason::kUniversalSizeAmplification);
}
bool FIFOCompactionPicker::NeedsCompaction(const VersionStorageInfo* vstorage)
@ -1782,7 +1803,7 @@ Compaction* FIFOCompactionPicker::PickCompaction(
Compaction* c = new Compaction(
vstorage, mutable_cf_options, std::move(inputs), 0, 0, 0, 0,
kNoCompression, {}, /* is manual */ false, vstorage->CompactionScore(0),
/* is deletion compaction */ true);
/* is deletion compaction */ true, CompactionReason::kFIFOMaxSize);
level0_compactions_in_progress_.insert(c);
return c;
}

@ -1897,6 +1897,7 @@ void DBImpl::NotifyOnCompactionCompleted(
info.output_level = c->output_level();
info.stats = compaction_job_stats;
info.table_properties = c->GetOutputTableProperties();
info.compaction_reason = c->compaction_reason();
for (size_t i = 0; i < c->num_input_levels(); ++i) {
for (const auto fmd : *c->inputs(i)) {
auto fn = TableFileName(db_options_.db_paths, fmd->fd.GetNumber(),

@ -418,6 +418,167 @@ TEST_F(EventListenerTest, DisableBGCompaction) {
ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9);
}
class TestCompactionReasonListener : public EventListener {
public:
void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
std::lock_guard<std::mutex> lock(mutex_);
compaction_reasons_.push_back(ci.compaction_reason);
}
std::vector<CompactionReason> compaction_reasons_;
std::mutex mutex_;
};
TEST_F(EventListenerTest, CompactionReasonLevel) {
Options options;
options.create_if_missing = true;
options.memtable_factory.reset(
new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
TestCompactionReasonListener* listener = new TestCompactionReasonListener();
options.listeners.emplace_back(listener);
options.level0_file_num_compaction_trigger = 4;
options.compaction_style = kCompactionStyleLevel;
DestroyAndReopen(options);
Random rnd(301);
// Write 4 files in L0
for (int i = 0; i < 4; i++) {
GenerateNewRandomFile(&rnd);
}
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(listener->compaction_reasons_.size(), 1);
ASSERT_EQ(listener->compaction_reasons_[0],
CompactionReason::kLevelL0FilesNum);
DestroyAndReopen(options);
// Write 3 non-overlapping files in L0
for (int k = 1; k <= 30; k++) {
ASSERT_OK(Put(Key(k), Key(k)));
if (k % 10 == 0) {
Flush();
}
}
// Do a trivial move from L0 -> L1
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
options.max_bytes_for_level_base = 1;
Close();
listener->compaction_reasons_.clear();
Reopen(options);
dbfull()->TEST_WaitForCompact();
ASSERT_GT(listener->compaction_reasons_.size(), 1);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kLevelMaxLevelSize);
}
options.disable_auto_compactions = true;
Close();
listener->compaction_reasons_.clear();
Reopen(options);
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_GT(listener->compaction_reasons_.size(), 0);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
}
}
TEST_F(EventListenerTest, CompactionReasonUniversal) {
Options options;
options.create_if_missing = true;
options.memtable_factory.reset(
new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
TestCompactionReasonListener* listener = new TestCompactionReasonListener();
options.listeners.emplace_back(listener);
options.compaction_style = kCompactionStyleUniversal;
Random rnd(301);
options.level0_file_num_compaction_trigger = 8;
options.compaction_options_universal.max_size_amplification_percent = 100000;
options.compaction_options_universal.size_ratio = 100000;
DestroyAndReopen(options);
listener->compaction_reasons_.clear();
// Write 8 files in L0
for (int i = 0; i < 8; i++) {
GenerateNewRandomFile(&rnd);
}
dbfull()->TEST_WaitForCompact();
ASSERT_GT(listener->compaction_reasons_.size(), 0);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSortedRunNum);
}
options.level0_file_num_compaction_trigger = 8;
options.compaction_options_universal.max_size_amplification_percent = 1;
options.compaction_options_universal.size_ratio = 100000;
DestroyAndReopen(options);
listener->compaction_reasons_.clear();
// Write 8 files in L0
for (int i = 0; i < 8; i++) {
GenerateNewRandomFile(&rnd);
}
dbfull()->TEST_WaitForCompact();
ASSERT_GT(listener->compaction_reasons_.size(), 0);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSizeAmplification);
}
options.disable_auto_compactions = true;
Close();
listener->compaction_reasons_.clear();
Reopen(options);
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_GT(listener->compaction_reasons_.size(), 0);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
}
}
TEST_F(EventListenerTest, CompactionReasonFIFO) {
Options options;
options.create_if_missing = true;
options.memtable_factory.reset(
new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
TestCompactionReasonListener* listener = new TestCompactionReasonListener();
options.listeners.emplace_back(listener);
options.level0_file_num_compaction_trigger = 4;
options.compaction_style = kCompactionStyleFIFO;
options.compaction_options_fifo.max_table_files_size = 1;
DestroyAndReopen(options);
Random rnd(301);
// Write 4 files in L0
for (int i = 0; i < 4; i++) {
GenerateNewRandomFile(&rnd);
}
dbfull()->TEST_WaitForCompact();
ASSERT_GT(listener->compaction_reasons_.size(), 0);
for (auto compaction_reason : listener->compaction_reasons_) {
ASSERT_EQ(compaction_reason, CompactionReason::kFIFOMaxSize);
}
}
} // namespace rocksdb
#endif // ROCKSDB_LITE

@ -81,6 +81,26 @@ struct FlushJobInfo {
TableProperties table_properties;
};
enum class CompactionReason {
kUnknown,
// [Level] number of L0 files > level0_file_num_compaction_trigger
kLevelL0FilesNum,
// [Level] total size of level > MaxBytesForLevel()
kLevelMaxLevelSize,
// [Universal] Compacting for size amplification
kUniversalSizeAmplification,
// [Universal] Compacting for size ratio
kUniversalSizeRatio,
// [Universal] number of sorted runs > level0_file_num_compaction_trigger
kUniversalSortedRunNum,
// [FIFO] total size > max_table_files_size
kFIFOMaxSize,
// Manual compaction
kManualCompaction,
// DB::SuggestCompactRange() marked files for compaction
kFilesMarkedForCompaction,
};
struct CompactionJobInfo {
CompactionJobInfo() = default;
explicit CompactionJobInfo(const CompactionJobStats& _stats) :
@ -107,6 +127,9 @@ struct CompactionJobInfo {
// The map is keyed by values from input_files and output_files.
TablePropertiesCollection table_properties;
// Reason to run the compaction
CompactionReason compaction_reason;
// If non-null, this variable stores detailed information
// about this compaction.
CompactionJobStats stats;

Loading…
Cancel
Save