Centralize compression decision to compaction picker

Summary:
Before this diff, we're deciding enable_compression in CompactionPicker and then we're deciding final compression type in DBImpl. This is kind of confusing.

After the diff, the final compression type will be decided in CompactionPicker.

The reason for this is that I want CompactFiles() to specify output compression type, so that people can mix and match compression styles in their compaction algorithms. This diff makes it much easier to do that.

Test Plan: make check

Reviewers: dhruba, haobo, sdong, yhchiang, ljin

Reviewed By: ljin

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D19137
main
Igor Canadi 11 years ago
parent d3f63f03ad
commit f146cab261
  1. 4
      db/compaction.cc
  2. 8
      db/compaction.h
  3. 41
      db/compaction_picker.cc
  4. 38
      db/db_impl.cc
  5. 11
      db/db_impl.h

@ -29,7 +29,7 @@ static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
Compaction::Compaction(Version* input_version, int level, int out_level,
uint64_t target_file_size,
uint64_t max_grandparent_overlap_bytes,
bool seek_compaction, bool enable_compression,
CompressionType output_compression, bool seek_compaction,
bool deletion_compaction)
: level_(level),
out_level_(out_level),
@ -38,8 +38,8 @@ Compaction::Compaction(Version* input_version, int level, int out_level,
input_version_(input_version),
number_levels_(input_version_->NumberLevels()),
cfd_(input_version_->cfd_),
output_compression_(output_compression),
seek_compaction_(seek_compaction),
enable_compression_(enable_compression),
deletion_compaction_(deletion_compaction),
grandparent_index_(0),
seen_key_(false),

@ -47,8 +47,8 @@ class Compaction {
// Maximum size of files to build during this compaction.
uint64_t MaxOutputFileSize() const { return max_output_file_size_; }
// Whether compression will be enabled for compaction outputs
bool enable_compression() const { return enable_compression_; }
// What compression for output
CompressionType OutputCompressionType() const { return output_compression_; }
// Is this a trivial compaction that can be implemented by just
// moving a single input file to the next level (no merging or splitting)
@ -104,7 +104,7 @@ class Compaction {
Compaction(Version* input_version, int level, int out_level,
uint64_t target_file_size, uint64_t max_grandparent_overlap_bytes,
bool seek_compaction = false, bool enable_compression = true,
CompressionType output_compression, bool seek_compaction = false,
bool deletion_compaction = false);
int level_;
@ -116,8 +116,8 @@ class Compaction {
int number_levels_;
ColumnFamilyData* cfd_;
CompressionType output_compression_;
bool seek_compaction_;
bool enable_compression_;
// if true, just delete files in inputs_[0]
bool deletion_compaction_;

@ -18,6 +18,31 @@
namespace rocksdb {
namespace {
// Determine compression type, based on user options, level of the output
// file and whether compression is disabled.
// If enable_compression is false, then compression is always disabled no
// matter what the values of the other two parameters are.
// Otherwise, the compression type is determined based on options and level.
CompressionType GetCompressionType(const Options& options, int level,
const bool enable_compression = true) {
if (!enable_compression) {
// disable compression
return kNoCompression;
}
// If the use has specified a different compression level for each level,
// then pick the compresison for that level.
if (!options.compression_per_level.empty()) {
const int n = options.compression_per_level.size() - 1;
// It is possible for level_ to be -1; in that case, we use level
// 0's compression. This occurs mostly in backwards compatibility
// situations when the builder doesn't know what level the file
// belongs to. Likewise, if level_ is beyond the end of the
// specified compression levels, use the last value.
return options.compression_per_level[std::max(0, std::min(level, n))];
} else {
return options.compression;
}
}
uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
@ -345,7 +370,8 @@ Compaction* CompactionPicker::CompactRange(Version* version, int input_level,
}
Compaction* c = new Compaction(version, input_level, output_level,
MaxFileSizeForLevel(output_level),
MaxGrandParentOverlapBytes(input_level));
MaxGrandParentOverlapBytes(input_level),
GetCompressionType(*options_, output_level));
c->inputs_[0] = inputs;
if (ExpandWhileOverlapping(c) == false) {
@ -465,7 +491,8 @@ Compaction* LevelCompactionPicker::PickCompactionBySize(Version* version,
assert(level >= 0);
assert(level + 1 < NumberLevels());
c = new Compaction(version, level, level + 1, MaxFileSizeForLevel(level + 1),
MaxGrandParentOverlapBytes(level));
MaxGrandParentOverlapBytes(level),
GetCompressionType(*options_, level + 1));
c->score_ = score;
// Pick the largest file in this level that is not already
@ -736,9 +763,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
}
}
}
Compaction* c =
new Compaction(version, level, level, MaxFileSizeForLevel(level),
LLONG_MAX, false, enable_compression);
Compaction* c = new Compaction(
version, level, level, MaxFileSizeForLevel(level), LLONG_MAX,
GetCompressionType(*options_, level, enable_compression));
c->score_ = score;
for (unsigned int i = start_index; i < first_index_after; i++) {
@ -840,7 +867,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
// We always compact all the files, so always compress.
Compaction* c =
new Compaction(version, level, level, MaxFileSizeForLevel(level),
LLONG_MAX, false, true);
LLONG_MAX, GetCompressionType(*options_, level));
c->score_ = score;
for (unsigned int loop = start_index; loop < files.size(); loop++) {
f = c->input_version_->files_[level][loop];
@ -882,7 +909,7 @@ Compaction* FIFOCompactionPicker::PickCompaction(Version* version,
return nullptr;
}
Compaction* c = new Compaction(version, 0, 0, 0, 0, false, false,
Compaction* c = new Compaction(version, 0, 0, 0, 0, kNoCompression, false,
true /* is deletion compaction */);
// delete old files (FIFO)
for (auto ritr = version->files_[0].rbegin();

@ -297,27 +297,7 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
return result;
}
CompressionType GetCompressionType(const Options& options, int level,
const bool enable_compression) {
if (!enable_compression) {
// disable compression
return kNoCompression;
}
// If the use has specified a different compression level for each level,
// then pick the compresison for that level.
if (!options.compression_per_level.empty()) {
const int n = options.compression_per_level.size() - 1;
// It is possible for level_ to be -1; in that case, we use level
// 0's compression. This occurs mostly in backwards compatibility
// situations when the builder doesn't know what level the file
// belongs to. Likewise, if level_ is beyond the end of the
// specified compression levels, use the last value.
return options.compression_per_level[std::max(0, std::min(level, n))];
} else {
return options.compression;
}
}
namespace {
CompressionType GetCompressionFlush(const Options& options) {
// Compressing memtable flushes might not help unless the sequential load
// optimization is used for leveled compaction. Otherwise the CPU and
@ -325,12 +305,13 @@ CompressionType GetCompressionFlush(const Options& options) {
bool can_compress;
if (options.compaction_style == kCompactionStyleUniversal) {
if (options.compaction_style == kCompactionStyleUniversal) {
can_compress =
(options.compaction_options_universal.compression_size_percent < 0);
} else {
// For leveled compress when min_level_to_compress == 0.
can_compress = (GetCompressionType(options, 0, true) != kNoCompression);
can_compress = options.compression_per_level.empty() ||
options.compression_per_level[0] != kNoCompression;
}
if (can_compress) {
@ -339,6 +320,7 @@ CompressionType GetCompressionFlush(const Options& options) {
return kNoCompression;
}
}
} // namespace
DBImpl::DBImpl(const DBOptions& options, const std::string& dbname)
: env_(options.env),
@ -2343,13 +2325,9 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
compact->compaction->OutputFilePreallocationSize());
ColumnFamilyData* cfd = compact->compaction->column_family_data();
CompressionType compression_type =
GetCompressionType(*cfd->options(), compact->compaction->output_level(),
compact->compaction->enable_compression());
compact->builder.reset(
NewTableBuilder(*cfd->options(), cfd->internal_comparator(),
compact->outfile.get(), compression_type));
compact->builder.reset(NewTableBuilder(
*cfd->options(), cfd->internal_comparator(), compact->outfile.get(),
compact->compaction->OutputCompressionType()));
}
LogFlush(options_.info_log);
return s;

@ -625,15 +625,4 @@ extern Options SanitizeOptions(const std::string& db,
const Options& src);
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
// Determine compression type, based on user options, level of the output
// file and whether compression is disabled.
// If enable_compression is false, then compression is always disabled no
// matter what the values of the other two parameters are.
// Otherwise, the compression type is determined based on options and level.
CompressionType GetCompressionType(const Options& options, int level,
const bool enable_compression);
// Determine compression type for L0 file written by memtable flush.
CompressionType GetCompressionFlush(const Options& options);
} // namespace rocksdb

Loading…
Cancel
Save