|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#include "util/arena.h"
|
|
|
|
#include "util/autovector.h"
|
|
|
|
#include "util/mutable_cf_options.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
// The structure that manages compaction input files associated
|
|
|
|
// with the same physical level.
|
|
|
|
struct CompactionInputFiles {
|
|
|
|
int level;
|
|
|
|
std::vector<FileMetaData*> files;
|
|
|
|
inline bool empty() const { return files.empty(); }
|
|
|
|
inline size_t size() const { return files.size(); }
|
|
|
|
inline void clear() { files.clear(); }
|
|
|
|
inline FileMetaData* operator[](size_t i) const { return files[i]; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class Version;
|
|
|
|
class ColumnFamilyData;
|
|
|
|
class VersionStorageInfo;
|
|
|
|
|
|
|
|
// A Compaction encapsulates information about a compaction.
|
|
|
|
class Compaction {
|
|
|
|
public:
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
Compaction(VersionStorageInfo* input_version,
|
|
|
|
const autovector<CompactionInputFiles>& inputs,
|
|
|
|
int start_level, int output_level,
|
|
|
|
uint64_t max_grandparent_overlap_bytes,
|
|
|
|
const CompactionOptions& options,
|
|
|
|
bool deletion_compaction);
|
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
Compaction(const Compaction&) = delete;
|
|
|
|
void operator=(const Compaction&) = delete;
|
|
|
|
|
|
|
|
~Compaction();
|
|
|
|
|
|
|
|
// Returns the level associated to the specified compaction input level.
|
|
|
|
// If compaction_input_level is not specified, then input_level is set to 0.
|
|
|
|
int level(size_t compaction_input_level = 0) const {
|
|
|
|
return inputs_[compaction_input_level].level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Outputs will go to this level
|
|
|
|
int output_level() const { return output_level_; }
|
|
|
|
|
|
|
|
// Returns the number of input levels in this compaction.
|
|
|
|
size_t num_input_levels() const { return inputs_.size(); }
|
|
|
|
|
|
|
|
// Return the object that holds the edits to the descriptor done
|
|
|
|
// by this compaction.
|
|
|
|
VersionEdit* edit() const { return edit_; }
|
|
|
|
|
|
|
|
// Returns the number of input files associated to the specified
|
|
|
|
// compaction input level.
|
|
|
|
// The function will return 0 if when "compaction_input_level" < 0
|
|
|
|
// or "compaction_input_level" >= "num_input_levels()".
|
|
|
|
size_t num_input_files(size_t compaction_input_level) const {
|
|
|
|
if (compaction_input_level < inputs_.size()) {
|
|
|
|
return inputs_[compaction_input_level].size();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns input version of the compaction
|
|
|
|
Version* input_version() const { return input_version_; }
|
|
|
|
|
|
|
|
// Returns the ColumnFamilyData associated with the compaction.
|
|
|
|
ColumnFamilyData* column_family_data() const { return cfd_; }
|
|
|
|
|
|
|
|
// Returns the file meta data of the 'i'th input file at the
|
|
|
|
// specified compaction input level.
|
|
|
|
// REQUIREMENT: "compaction_input_level" must be >= 0 and
|
|
|
|
// < "input_levels()"
|
|
|
|
FileMetaData* input(size_t compaction_input_level, size_t i) const {
|
|
|
|
assert(compaction_input_level < inputs_.size());
|
|
|
|
return inputs_[compaction_input_level][i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the list of file meta data of the specified compaction
|
|
|
|
// input level.
|
|
|
|
// REQUIREMENT: "compaction_input_level" must be >= 0 and
|
|
|
|
// < "input_levels()"
|
|
|
|
std::vector<FileMetaData*>* const inputs(size_t compaction_input_level) {
|
|
|
|
assert(compaction_input_level < inputs_.size());
|
|
|
|
return &inputs_[compaction_input_level].files;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the LevelFilesBrief of the specified compaction input level.
|
|
|
|
LevelFilesBrief* input_levels(size_t compaction_input_level) {
|
|
|
|
return &input_levels_[compaction_input_level];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maximum size of files to build during this compaction.
|
|
|
|
uint64_t MaxOutputFileSize() const { return max_output_file_size_; }
|
|
|
|
|
|
|
|
// What compression for output
|
|
|
|
CompressionType OutputCompressionType() const { return output_compression_; }
|
|
|
|
|
|
|
|
// Whether need to write output file to second DB path.
|
|
|
|
uint32_t GetOutputPathId() const { return output_path_id_; }
|
|
|
|
|
|
|
|
// Generate input_levels_ from inputs_
|
|
|
|
// Should be called when inputs_ is stable
|
|
|
|
void GenerateFileLevels();
|
|
|
|
|
|
|
|
// Is this a trivial compaction that can be implemented by just
|
|
|
|
// moving a single input file to the next level (no merging or splitting)
|
|
|
|
bool IsTrivialMove() const;
|
|
|
|
|
|
|
|
// If true, then the compaction can be done by simply deleting input files.
|
|
|
|
bool IsDeletionCompaction() const {
|
|
|
|
return deletion_compaction_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add all inputs to this compaction as delete operations to *edit.
|
|
|
|
void AddInputDeletions(VersionEdit* edit);
|
|
|
|
|
|
|
|
// Returns true if the available information we have guarantees that
|
|
|
|
// the input "user_key" does not exist in any level beyond "output_level()".
|
|
|
|
bool KeyNotExistsBeyondOutputLevel(const Slice& user_key);
|
|
|
|
|
|
|
|
// Returns true iff we should stop building the current output
|
|
|
|
// before processing "internal_key".
|
|
|
|
bool ShouldStopBefore(const Slice& internal_key);
|
|
|
|
|
|
|
|
// Clear all files to indicate that they are not being compacted
|
|
|
|
// Delete this compaction from the list of running compactions.
|
|
|
|
void ReleaseCompactionFiles(Status status);
|
|
|
|
|
|
|
|
// Returns the summary of the compaction in "output" with maximum "len"
|
|
|
|
// in bytes. The caller is responsible for the memory management of
|
|
|
|
// "output".
|
|
|
|
void Summary(char* output, int len);
|
|
|
|
|
|
|
|
// Return the score that was used to pick this compaction run.
|
|
|
|
double score() const { return score_; }
|
|
|
|
|
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
|
|
bool BottomMostLevel() { return bottommost_level_; }
|
|
|
|
|
|
|
|
// Does this compaction include all sst files?
|
|
|
|
bool IsFullCompaction() { return is_full_compaction_; }
|
|
|
|
|
|
|
|
// Was this compaction triggered manually by the client?
|
|
|
|
bool IsManualCompaction() { return is_manual_compaction_; }
|
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
void SetOutputPathId(uint32_t path_id) { output_path_id_ = path_id; }
|
|
|
|
|
|
|
|
// Return the MutableCFOptions that should be used throughout the compaction
|
|
|
|
// procedure
|
|
|
|
const MutableCFOptions* mutable_cf_options() { return &mutable_cf_options_; }
|
|
|
|
|
|
|
|
// Returns the size in bytes that the output file should be preallocated to.
|
|
|
|
// In level compaction, that is max_file_size_. In universal compaction, that
|
|
|
|
// is the sum of all input file sizes.
|
|
|
|
uint64_t OutputFilePreallocationSize(const MutableCFOptions& mutable_options);
|
|
|
|
|
|
|
|
void SetInputVersion(Version* input_version);
|
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
10 years ago
|
|
|
// mark (or clear) all files that are being compacted
|
|
|
|
void MarkFilesBeingCompacted(bool mark_as_compacted);
|
|
|
|
|
|
|
|
// Initialize whether the compaction is producing files at the
|
|
|
|
// bottommost level.
|
|
|
|
//
|
|
|
|
// @see BottomMostLevel()
|
|
|
|
void SetupBottomMostLevel(VersionStorageInfo* vstorage, bool is_manual,
|
|
|
|
bool level0_only);
|
|
|
|
|
|
|
|
static Compaction* TEST_NewCompaction(
|
|
|
|
int num_levels, int start_level, int out_level, uint64_t target_file_size,
|
|
|
|
uint64_t max_grandparent_overlap_bytes, uint32_t output_path_id,
|
|
|
|
CompressionType output_compression, bool seek_compaction = false,
|
|
|
|
bool deletion_compaction = false);
|
|
|
|
|
|
|
|
CompactionInputFiles* TEST_GetInputFiles(int l) {
|
|
|
|
return &inputs_[l];
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
CompactionPicker
Summary:
This is a big one. This diff moves all the code related to picking compactions from VersionSet to new class CompactionPicker. Column families' compactions will be completely separate processes, so we need to have multiple CompactionPickers.
To make this easier to review, most of the code change is just copy/paste. There is also a small change not to use VersionSet::current_, but rather to take `Version* version` as a parameter. Most of the other code is exactly the same.
In future diffs, I will also make some improvements to CompactionPickers. I think the most important part will be encapsulating it better. Currently Version, VersionSet, Compaction and CompactionPicker are all friend classes, which makes it harder to change the implementation.
This diff depends on D15171, D15183, D15189 and D15201
Test Plan: `make check`
Reviewers: kailiu, sdong, dhruba, haobo
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15207
11 years ago
|
|
|
friend class CompactionPicker;
|
|
|
|
friend class UniversalCompactionPicker;
|
|
|
|
friend class FIFOCompactionPicker;
|
CompactionPicker
Summary:
This is a big one. This diff moves all the code related to picking compactions from VersionSet to new class CompactionPicker. Column families' compactions will be completely separate processes, so we need to have multiple CompactionPickers.
To make this easier to review, most of the code change is just copy/paste. There is also a small change not to use VersionSet::current_, but rather to take `Version* version` as a parameter. Most of the other code is exactly the same.
In future diffs, I will also make some improvements to CompactionPickers. I think the most important part will be encapsulating it better. Currently Version, VersionSet, Compaction and CompactionPicker are all friend classes, which makes it harder to change the implementation.
This diff depends on D15171, D15183, D15189 and D15201
Test Plan: `make check`
Reviewers: kailiu, sdong, dhruba, haobo
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15207
11 years ago
|
|
|
friend class LevelCompactionPicker;
|
|
|
|
|
|
|
|
Compaction(int num_levels, int start_level, int out_level,
|
|
|
|
uint64_t target_file_size, uint64_t max_grandparent_overlap_bytes,
|
|
|
|
uint32_t output_path_id, CompressionType output_compression,
|
|
|
|
bool seek_compaction = false, bool deletion_compaction = false);
|
|
|
|
|
|
|
|
const int start_level_; // the lowest level to be compacted
|
|
|
|
const int output_level_; // levels to which output files are stored
|
|
|
|
uint64_t max_output_file_size_;
|
|
|
|
uint64_t max_grandparent_overlap_bytes_;
|
|
|
|
MutableCFOptions mutable_cf_options_;
|
|
|
|
Version* input_version_;
|
|
|
|
VersionEdit* edit_;
|
|
|
|
int number_levels_;
|
|
|
|
ColumnFamilyData* cfd_;
|
|
|
|
Arena arena_; // Arena used to allocate space for file_levels_
|
|
|
|
|
|
|
|
uint32_t output_path_id_;
|
|
|
|
CompressionType output_compression_;
|
|
|
|
bool seek_compaction_;
|
|
|
|
// If true, then the comaction can be done by simply deleting input files.
|
|
|
|
bool deletion_compaction_;
|
|
|
|
|
|
|
|
// Compaction input files organized by level.
|
|
|
|
autovector<CompactionInputFiles> inputs_;
|
|
|
|
|
|
|
|
// A copy of inputs_, organized more closely in memory
|
|
|
|
autovector<LevelFilesBrief, 2> input_levels_;
|
|
|
|
|
|
|
|
// State used to check for number of of overlapping grandparent files
|
|
|
|
// (grandparent == "output_level_ + 1")
|
|
|
|
// This vector is updated by Version::GetOverlappingInputs().
|
|
|
|
std::vector<FileMetaData*> grandparents_;
|
|
|
|
size_t grandparent_index_; // Index in grandparent_starts_
|
|
|
|
bool seen_key_; // Some output key has been seen
|
|
|
|
uint64_t overlapped_bytes_; // Bytes of overlap between current output
|
|
|
|
// and grandparent files
|
|
|
|
int base_index_; // index of the file in files_[start_level_]
|
|
|
|
int parent_index_; // index of some file with same range in
|
|
|
|
// files_[start_level_+1]
|
|
|
|
double score_; // score that was used to pick this compaction.
|
|
|
|
|
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
|
|
bool bottommost_level_;
|
|
|
|
// Does this compaction include all sst files?
|
|
|
|
bool is_full_compaction_;
|
|
|
|
|
|
|
|
// Is this compaction requested by the client?
|
|
|
|
bool is_manual_compaction_;
|
|
|
|
|
|
|
|
// "level_ptrs_" holds indices into "input_version_->levels_", where each
|
|
|
|
// index remembers which file of an associated level we are currently used
|
|
|
|
// to check KeyNotExistsBeyondOutputLevel() for deletion operation.
|
|
|
|
// As it is for checking KeyNotExistsBeyondOutputLevel(), it only
|
|
|
|
// records indices for all levels beyond "output_level_".
|
|
|
|
std::vector<size_t> level_ptrs_;
|
|
|
|
|
|
|
|
// In case of compaction error, reset the nextIndex that is used
|
|
|
|
// to pick up the next file to be compacted from files_by_size_
|
|
|
|
void ResetNextCompactionIndex();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Utility function
|
|
|
|
extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
|
|
|
|
|
|
|
|
} // namespace rocksdb
|