improve comments for CompactionJob (#5341)

Summary:
add class/function level comments to the header file
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5341

Differential Revision: D15485442

Pulled By: miasantreble

fbshipit-source-id: 9f11e2a1cd3ce0f4990f01353d0a6f4b050615cf
main
Zhongyi Xie 6 years ago committed by Facebook Github Bot
parent 38a06aa225
commit 09b534cc2f
  1. 6
      db/compaction_job.cc
  2. 17
      db/compaction_job.h

@ -415,7 +415,6 @@ void CompactionJob::Prepare() {
write_hint_ = write_hint_ =
c->column_family_data()->CalculateSSTWriteHint(c->output_level()); c->column_family_data()->CalculateSSTWriteHint(c->output_level());
// Is this compaction producing files at the bottommost level?
bottommost_level_ = c->bottommost_level(); bottommost_level_ = c->bottommost_level();
if (c->ShouldFormSubcompactions()) { if (c->ShouldFormSubcompactions()) {
@ -445,11 +444,6 @@ struct RangeWithSize {
: range(a, b), size(s) {} : range(a, b), size(s) {}
}; };
// Generates a histogram representing potential divisions of key ranges from
// the input. It adds the starting and/or ending keys of certain input files
// to the working set and then finds the approximate size of data in between
// each consecutive pair of slices. Then it divides these ranges into
// consecutive groups such that each group has a similar size.
void CompactionJob::GenSubcompactionBoundaries() { void CompactionJob::GenSubcompactionBoundaries() {
auto* c = compact_->compaction; auto* c = compact_->compaction;
auto* cfd = c->column_family_data(); auto* cfd = c->column_family_data();

@ -55,6 +55,11 @@ class Version;
class VersionEdit; class VersionEdit;
class VersionSet; class VersionSet;
// CompactionJob is responsible for executing the compaction. Each (manual or
// automated) compaction corresponds to a CompactionJob object, and usually
// goes through the stages of `Prepare()`->`Run()`->`Install()`. CompactionJob
// will divide the compaction into subcompactions and execute them in parallel
// if needed.
class CompactionJob { class CompactionJob {
public: public:
CompactionJob( CompactionJob(
@ -80,17 +85,28 @@ class CompactionJob {
CompactionJob& operator=(const CompactionJob& job) = delete; CompactionJob& operator=(const CompactionJob& job) = delete;
// REQUIRED: mutex held // REQUIRED: mutex held
// Prepare for the compaction by setting up boundaries for each subcompaction
void Prepare(); void Prepare();
// REQUIRED mutex not held // REQUIRED mutex not held
// Launch threads for each subcompaction and wait for them to finish. After
// that, verify table is usable and finally do bookkeeping to unify
// subcompaction results
Status Run(); Status Run();
// REQUIRED: mutex held // REQUIRED: mutex held
// Add compaction input/output to the current version
Status Install(const MutableCFOptions& mutable_cf_options); Status Install(const MutableCFOptions& mutable_cf_options);
private: private:
struct SubcompactionState; struct SubcompactionState;
void AggregateStatistics(); void AggregateStatistics();
// Generates a histogram representing potential divisions of key ranges from
// the input. It adds the starting and/or ending keys of certain input files
// to the working set and then finds the approximate size of data in between
// each consecutive pair of slices. Then it divides these ranges into
// consecutive groups such that each group has a similar size.
void GenSubcompactionBoundaries(); void GenSubcompactionBoundaries();
// update the thread status for starting a compaction. // update the thread status for starting a compaction.
@ -163,6 +179,7 @@ class CompactionJob {
EventLogger* event_logger_; EventLogger* event_logger_;
// Is this compaction creating a file in the bottom most level?
bool bottommost_level_; bool bottommost_level_;
bool paranoid_file_checks_; bool paranoid_file_checks_;
bool measure_io_stats_; bool measure_io_stats_;

Loading…
Cancel
Save