Add struct CompactionInputFiles to manage compaction input files.

Summary: Add struct CompactionInputFiles to manage compaction input files.

Test Plan:
export ROCKSDB_TESTS=Compact
make db_test
./db_test

Reviewers: ljin, igor, sdong

Reviewed By: sdong

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D20061
main
Yueh-Hsuan Chiang 11 years ago
parent bc6b2ab401
commit 296e340753
  1. 12
      db/compaction.cc
  2. 23
      db/compaction.h
  3. 49
      db/compaction_picker.cc
  4. 2
      db/version_set.cc

@ -61,6 +61,9 @@ Compaction::Compaction(Version* input_version, int level, int out_level,
for (int i = 0; i < number_levels_; i++) {
level_ptrs_[i] = 0;
}
for (int i = 0; i < 2; ++i) {
inputs_[i].level = level_ + i;
}
}
Compaction::~Compaction() {
@ -78,7 +81,7 @@ Compaction::~Compaction() {
void Compaction::GenerateFileLevels() {
input_levels_.resize(2);
for (int which = 0; which < 2; which++) {
DoGenerateFileLevel(&input_levels_[which], inputs_[which], &arena_);
DoGenerateFileLevel(&input_levels_[which], inputs_[which].files, &arena_);
}
}
@ -158,7 +161,6 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Mark (or clear) each file that is being compacted
void Compaction::MarkFilesBeingCompacted(bool value) {
for (int i = 0; i < 2; i++) {
std::vector<FileMetaData*> v = inputs_[i];
for (unsigned int j = 0; j < inputs_[i].size(); j++) {
assert(value ? !inputs_[i][j]->being_compacted :
inputs_[i][j]->being_compacted);
@ -241,7 +243,7 @@ void Compaction::Summary(char* output, int len) {
return;
}
write += InputSummary(inputs_[0], output + write, len - write);
write += InputSummary(inputs_[0].files, output + write, len - write);
if (write < 0 || write >= len) {
return;
}
@ -251,7 +253,7 @@ void Compaction::Summary(char* output, int len) {
return;
}
write += InputSummary(inputs_[1], output + write, len - write);
write += InputSummary(inputs_[1].files, output + write, len - write);
if (write < 0 || write >= len) {
return;
}
@ -266,7 +268,7 @@ uint64_t Compaction::OutputFilePreallocationSize() {
preallocation_size =
cfd_->compaction_picker()->MaxFileSizeForLevel(output_level());
} else {
for (const auto& f : inputs_[0]) {
for (const auto& f : inputs_[0].files) {
preallocation_size += f->fd.GetFileSize();
}
}

@ -14,6 +14,15 @@
namespace rocksdb {
struct CompactionInputFiles {
int level;
std::vector<FileMetaData*> files;
inline bool empty() const { return files.empty(); }
inline size_t size() const { return files.size(); }
inline void clear() { files.clear(); }
inline FileMetaData* operator[](int i) const { return files[i]; }
};
class Version;
class ColumnFamilyData;
@ -26,9 +35,9 @@ class Compaction {
~Compaction();
// Return the level that is being compacted. Inputs from "level"
// will be merged.
int level() const { return level_; }
// Returns the level associated to the specified compaction input level.
// If input_level is not specified, then input_level is set to 0.
int level(int input_level = 0) const { return inputs_[input_level].level; }
// Outputs will go to this level
int output_level() const { return out_level_; }
@ -48,7 +57,11 @@ class Compaction {
// Return the ith input file at "level()+which" ("which" must be 0 or 1).
FileMetaData* input(int which, int i) const { return inputs_[which][i]; }
std::vector<FileMetaData*>* inputs(int which) { return &inputs_[which]; }
// Returns the list of FileMataData associated with the specified
// compaction input level.
std::vector<FileMetaData*>* inputs(int which) {
return &inputs_[which].files;
}
// Return the input_level file
FileLevel* input_levels(int which) { return &input_levels_[which]; }
@ -140,7 +153,7 @@ class Compaction {
bool deletion_compaction_;
// Each compaction reads inputs from "level_" and "level_+1"
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
CompactionInputFiles inputs_[2]; // The two sets of inputs
// A copy of inputs_, organized more closely in memory
autovector<FileLevel, 2> input_levels_;

@ -194,14 +194,15 @@ bool CompactionPicker::ExpandWhileOverlapping(Compaction* c) {
size_t old_size;
do {
old_size = c->inputs_[0].size();
GetRange(c->inputs_[0], &smallest, &largest);
GetRange(c->inputs_[0].files, &smallest, &largest);
c->inputs_[0].clear();
c->input_version_->GetOverlappingInputs(
level, &smallest, &largest, &c->inputs_[0], hint_index, &hint_index);
level, &smallest, &largest, &c->inputs_[0].files,
hint_index, &hint_index);
} while(c->inputs_[0].size() > old_size);
// Get the new range
GetRange(c->inputs_[0], &smallest, &largest);
GetRange(c->inputs_[0].files, &smallest, &largest);
// If, after the expansion, there are files that are already under
// compaction, then we must drop/cancel this compaction.
@ -211,7 +212,7 @@ bool CompactionPicker::ExpandWhileOverlapping(Compaction* c) {
"[%s] ExpandWhileOverlapping() failure because zero input files",
c->column_family_data()->GetName().c_str());
}
if (c->inputs_[0].empty() || FilesInCompaction(c->inputs_[0]) ||
if (c->inputs_[0].empty() || FilesInCompaction(c->inputs_[0].files) ||
(c->level() != c->output_level() &&
ParentRangeInCompaction(c->input_version_, &smallest, &largest, level,
&parent_index))) {
@ -267,16 +268,17 @@ void CompactionPicker::SetupOtherInputs(Compaction* c) {
InternalKey smallest, largest;
// Get the range one last time.
GetRange(c->inputs_[0], &smallest, &largest);
GetRange(c->inputs_[0].files, &smallest, &largest);
// Populate the set of next-level files (inputs_[1]) to include in compaction
c->input_version_->GetOverlappingInputs(level + 1, &smallest, &largest,
&c->inputs_[1], c->parent_index_,
&c->parent_index_);
c->input_version_->GetOverlappingInputs(
level + 1, &smallest, &largest,
&c->inputs_[1].files, c->parent_index_,
&c->parent_index_);
// Get entire range covered by compaction
InternalKey all_start, all_limit;
GetRange(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
GetRange(c->inputs_[0].files, c->inputs_[1].files, &all_start, &all_limit);
// See if we can further grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up. We also choose NOT
@ -287,8 +289,8 @@ void CompactionPicker::SetupOtherInputs(Compaction* c) {
std::vector<FileMetaData*> expanded0;
c->input_version_->GetOverlappingInputs(
level, &all_start, &all_limit, &expanded0, c->base_index_, nullptr);
const uint64_t inputs0_size = TotalCompensatedFileSize(c->inputs_[0]);
const uint64_t inputs1_size = TotalCompensatedFileSize(c->inputs_[1]);
const uint64_t inputs0_size = TotalCompensatedFileSize(c->inputs_[0].files);
const uint64_t inputs1_size = TotalCompensatedFileSize(c->inputs_[1].files);
const uint64_t expanded0_size = TotalCompensatedFileSize(expanded0);
uint64_t limit = ExpandedCompactionByteSizeLimit(level);
if (expanded0.size() > c->inputs_[0].size() &&
@ -312,9 +314,10 @@ void CompactionPicker::SetupOtherInputs(Compaction* c) {
inputs1_size);
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
c->inputs_[1] = expanded1;
GetRange(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
c->inputs_[0].files = expanded0;
c->inputs_[1].files = expanded1;
GetRange(c->inputs_[0].files, c->inputs_[1].files,
&all_start, &all_limit);
}
}
}
@ -374,7 +377,7 @@ Compaction* CompactionPicker::CompactRange(Version* version, int input_level,
MaxGrandParentOverlapBytes(input_level), 0,
GetCompressionType(*options_, output_level));
c->inputs_[0] = inputs;
c->inputs_[0].files = inputs;
if (ExpandWhileOverlapping(c) == false) {
delete c;
Log(options_->info_log,
@ -441,18 +444,18 @@ Compaction* LevelCompactionPicker::PickCompaction(Version* version,
if (level == 0) {
assert(compactions_in_progress_[0].empty());
InternalKey smallest, largest;
GetRange(c->inputs_[0], &smallest, &largest);
GetRange(c->inputs_[0].files, &smallest, &largest);
// Note that the next call will discard the file we placed in
// c->inputs_[0] earlier and replace it with an overlapping set
// which will include the picked file.
c->inputs_[0].clear();
c->input_version_->GetOverlappingInputs(0, &smallest, &largest,
&c->inputs_[0]);
&c->inputs_[0].files);
// If we include more L0 files in the same compaction run it can
// cause the 'smallest' and 'largest' key to get extended to a
// larger range. So, re-invoke GetRange to get the new key range
GetRange(c->inputs_[0], &smallest, &largest);
GetRange(c->inputs_[0].files, &smallest, &largest);
if (ParentRangeInCompaction(c->input_version_, &smallest, &largest, level,
&c->parent_index_)) {
delete c;
@ -533,7 +536,7 @@ Compaction* LevelCompactionPicker::PickCompactionBySize(Version* version,
level, &parent_index)) {
continue;
}
c->inputs_[0].push_back(f);
c->inputs_[0].files.push_back(f);
c->base_index_ = index;
c->parent_index_ = parent_index;
break;
@ -615,7 +618,7 @@ Compaction* UniversalCompactionPicker::PickCompaction(Version* version,
// Is the earliest file part of this compaction?
FileMetaData* last_file = c->input_version_->files_[level].back();
c->bottommost_level_ = c->inputs_[0].back() == last_file;
c->bottommost_level_ = c->inputs_[0].files.back() == last_file;
// update statistics
MeasureTime(options_->statistics.get(), NUM_FILES_IN_SINGLE_COMPACTION,
@ -810,7 +813,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
for (unsigned int i = start_index; i < first_index_after; i++) {
FileMetaData* f = c->input_version_->files_[level][i];
c->inputs_[0].push_back(f);
c->inputs_[0].files.push_back(f);
LogToBuffer(log_buffer,
"[%s] Universal: Picking file %s[%d] "
"with size %" PRIu64 " (compensated size %" PRIu64 ")\n",
@ -918,7 +921,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
c->score_ = score;
for (unsigned int loop = start_index; loop < files.size(); loop++) {
f = c->input_version_->files_[level][loop];
c->inputs_[0].push_back(f);
c->inputs_[0].files.push_back(f);
LogToBuffer(log_buffer,
"[%s] Universal: size amp picking file %" PRIu64 "[%d] "
"with size %" PRIu64 " (compensated size %" PRIu64 ")",
@ -963,7 +966,7 @@ Compaction* FIFOCompactionPicker::PickCompaction(Version* version,
ritr != version->files_[0].rend(); ++ritr) {
auto f = *ritr;
total_size -= f->compensated_file_size;
c->inputs_[0].push_back(f);
c->inputs_[0].files.push_back(f);
char tmp_fsize[16];
AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize));
LogToBuffer(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64

@ -2830,7 +2830,7 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
int num = 0;
for (int which = 0; which < 2; which++) {
if (c->input_levels(which)->num_files != 0) {
if (c->level() + which == 0) {
if (c->level(which) == 0) {
const FileLevel* flevel = c->input_levels(which);
for (size_t i = 0; i < flevel->num_files; i++) {
list[num++] = cfd->table_cache()->NewIterator(

Loading…
Cancel
Save