Don't always compress L0 files written by memtable flush

Summary:
Code was always compressing L0 files written by a memtable flush
when compression was enabled. Now this is done when
min_level_to_compress=0 for leveled compaction and when
universal_compaction_size_percent=-1 for universal compaction.

Task ID: #3416472

Blame Rev:

Test Plan:
ran db_bench with compression options

Revert Plan:

Database Impact:

Memcache Impact:

Other Notes:

EImportant:

- begin *PUBLIC* platform impact section -
Bugzilla: #
- end platform impact -

Reviewers: dhruba, igor, sdong

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D14757
main
Mark Callaghan 11 years ago
parent a45b7d83ba
commit 50994bf699
  1. 4
      db/builder.cc
  2. 2
      db/builder.h
  3. 33
      db/db_impl.cc
  4. 3
      db/db_impl.h
  5. 3
      db/repair.cc

@ -42,7 +42,7 @@ Status BuildTable(const std::string& dbname,
const Comparator* user_comparator, const Comparator* user_comparator,
const SequenceNumber newest_snapshot, const SequenceNumber newest_snapshot,
const SequenceNumber earliest_seqno_in_memtable, const SequenceNumber earliest_seqno_in_memtable,
const bool enable_compression) { const CompressionType compression) {
Status s; Status s;
meta->file_size = 0; meta->file_size = 0;
meta->smallest_seqno = meta->largest_seqno = 0; meta->smallest_seqno = meta->largest_seqno = 0;
@ -65,7 +65,7 @@ Status BuildTable(const std::string& dbname,
} }
TableBuilder* builder = GetTableBuilder(options, file.get(), TableBuilder* builder = GetTableBuilder(options, file.get(),
options.compression); compression);
// the first key is the smallest key // the first key is the smallest key
Slice key = iter->key(); Slice key = iter->key();

@ -43,6 +43,6 @@ extern Status BuildTable(const std::string& dbname,
const Comparator* user_comparator, const Comparator* user_comparator,
const SequenceNumber newest_snapshot, const SequenceNumber newest_snapshot,
const SequenceNumber earliest_seqno_in_memtable, const SequenceNumber earliest_seqno_in_memtable,
const bool enable_compression); const CompressionType compression);
} // namespace rocksdb } // namespace rocksdb

@ -225,6 +225,28 @@ CompressionType GetCompressionType(const Options& options, int level,
} }
} }
CompressionType GetCompressionFlush(const Options& options) {
// Compressing memtable flushes might not help unless the sequential load
// optimization is used for leveled compaction. Otherwise the CPU and
// latency overhead is not offset by saving much space.
bool can_compress;
if (options.compaction_style == kCompactionStyleUniversal) {
can_compress =
(options.compaction_options_universal.compression_size_percent < 0);
} else {
// For leveled compress when min_level_to_compress == 0.
can_compress = (GetCompressionType(options, 0, true) != kNoCompression);
}
if (can_compress) {
return options.compression;
} else {
return kNoCompression;
}
}
DBImpl::DBImpl(const Options& options, const std::string& dbname) DBImpl::DBImpl(const Options& options, const std::string& dbname)
: env_(options.env), : env_(options.env),
dbname_(dbname), dbname_(dbname),
@ -1068,7 +1090,8 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
s = BuildTable(dbname_, env_, options_, storage_options_, s = BuildTable(dbname_, env_, options_, storage_options_,
table_cache_.get(), iter, &meta, table_cache_.get(), iter, &meta,
user_comparator(), newest_snapshot, user_comparator(), newest_snapshot,
earliest_seqno_in_memtable, true); earliest_seqno_in_memtable,
GetCompressionFlush(options_));
LogFlush(options_.info_log); LogFlush(options_.info_log);
mutex_.Lock(); mutex_.Lock();
} }
@ -1129,15 +1152,11 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
Log(options_.info_log, Log(options_.info_log,
"Level-0 flush table #%lu: started", "Level-0 flush table #%lu: started",
(unsigned long)meta.number); (unsigned long)meta.number);
// We skip compression if universal compression is used and the size
// threshold is set for compression.
bool enable_compression = (options_.compaction_style
!= kCompactionStyleUniversal ||
options_.compaction_options_universal.compression_size_percent < 0);
s = BuildTable(dbname_, env_, options_, storage_options_, s = BuildTable(dbname_, env_, options_, storage_options_,
table_cache_.get(), iter, &meta, table_cache_.get(), iter, &meta,
user_comparator(), newest_snapshot, user_comparator(), newest_snapshot,
earliest_seqno_in_memtable, enable_compression); earliest_seqno_in_memtable, GetCompressionFlush(options_));
LogFlush(options_.info_log); LogFlush(options_.info_log);
delete iter; delete iter;
Log(options_.info_log, "Level-0 flush table #%lu: %lu bytes %s", Log(options_.info_log, "Level-0 flush table #%lu: %lu bytes %s",

@ -589,4 +589,7 @@ extern Options SanitizeOptions(const std::string& db,
CompressionType GetCompressionType(const Options& options, int level, CompressionType GetCompressionType(const Options& options, int level,
const bool enable_compression); const bool enable_compression);
// Determine compression type for L0 file written by memtable flush.
CompressionType GetCompressionFlush(const Options& options);
} // namespace rocksdb } // namespace rocksdb

@ -225,7 +225,8 @@ class Repairer {
Iterator* iter = mem->NewIterator(); Iterator* iter = mem->NewIterator();
status = BuildTable(dbname_, env_, options_, storage_options_, status = BuildTable(dbname_, env_, options_, storage_options_,
table_cache_, iter, &meta, table_cache_, iter, &meta,
icmp_.user_comparator(), 0, 0, true); icmp_.user_comparator(), 0, 0,
kNoCompression);
delete iter; delete iter;
delete mem->Unref(); delete mem->Unref();
mem = nullptr; mem = nullptr;

Loading…
Cancel
Save