Allow users to migrate to options.level_compaction_dynamic_level_bytes=true using CompactRange()

Summary: In DB::CompactRange(), change parameter "reduce_level" to "change_level". Users can compact all data to the last level if needed. By doing it, users can migrate the DB to options.level_compaction_dynamic_level_bytes=true.

Test Plan: Add a unit test for it.

Reviewers: yhchiang, anthony, kradhakrishnan, igor, rven

Reviewed By: rven

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D39099
main
sdong 10 years ago
parent d333820bad
commit 4266d4fd90
  1. 1
      HISTORY.md
  2. 27
      db/db_impl.cc
  3. 2
      db/db_impl.h
  4. 77
      db/db_test.cc
  5. 4
      db/version_set.cc
  6. 10
      include/rocksdb/db.h
  7. 4
      include/rocksdb/utilities/stackable_db.h
  8. 2
      utilities/compacted_db/compacted_db_impl.h

@ -7,6 +7,7 @@
* DB::GetDbIdentity() is now a const function. If this function is overridden in your application, be sure to also make GetDbIdentity() const to avoid compile error. * DB::GetDbIdentity() is now a const function. If this function is overridden in your application, be sure to also make GetDbIdentity() const to avoid compile error.
* Move listeners from ColumnFamilyOptions to DBOptions. * Move listeners from ColumnFamilyOptions to DBOptions.
* Add max_write_buffer_number_to_maintain option * Add max_write_buffer_number_to_maintain option
* DB::CompactRange()'s parameter reduce_level is changed to change_level, to allow users to move levels to lower levels if allowed. It can be used to migrate a DB from options.level_compaction_dynamic_level_bytes=false to options.level_compaction_dynamic_level_bytes.true.
## 3.11.0 (5/19/2015) ## 3.11.0 (5/19/2015)
### New Features ### New Features

@ -1299,7 +1299,7 @@ void DBImpl::NotifyOnFlushCompleted(
Status DBImpl::CompactRange(ColumnFamilyHandle* column_family, Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end, const Slice* begin, const Slice* end,
bool reduce_level, int target_level, bool change_level, int target_level,
uint32_t target_path_id) { uint32_t target_path_id) {
if (target_path_id >= db_options_.db_paths.size()) { if (target_path_id >= db_options_.db_paths.size()) {
return Status::InvalidArgument("Invalid target path ID"); return Status::InvalidArgument("Invalid target path ID");
@ -1364,7 +1364,7 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
return s; return s;
} }
if (reduce_level) { if (change_level) {
s = ReFitLevel(cfd, max_level_with_files, target_level); s = ReFitLevel(cfd, max_level_with_files, target_level);
} }
LogFlush(db_options_.info_log); LogFlush(db_options_.info_log);
@ -1658,6 +1658,9 @@ int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
assert(level < cfd->NumberLevels()); assert(level < cfd->NumberLevels());
if (target_level >= cfd->NumberLevels()) {
return Status::InvalidArgument("Target level exceeds number of levels");
}
SuperVersion* superversion_to_free = nullptr; SuperVersion* superversion_to_free = nullptr;
SuperVersion* new_superversion = new SuperVersion(); SuperVersion* new_superversion = new SuperVersion();
@ -1691,17 +1694,29 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
to_level = FindMinimumEmptyLevelFitting(cfd, mutable_cf_options, level); to_level = FindMinimumEmptyLevelFitting(cfd, mutable_cf_options, level);
} }
assert(to_level <= level);
Status status; Status status;
if (to_level < level) { auto* vstorage = cfd->current()->storage_info();
if (to_level > level) {
if (level == 0) {
return Status::NotSupported(
"Cannot change from level 0 to other levels.");
}
// Check levels are empty for a trivial move
for (int l = level + 1; l <= to_level; l++) {
if (vstorage->NumLevelFiles(l) > 0) {
return Status::NotSupported(
"Levels between source and target are not empty for a move.");
}
}
}
if (to_level != level) {
Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log,
"[%s] Before refitting:\n%s", "[%s] Before refitting:\n%s",
cfd->GetName().c_str(), cfd->current()->DebugString().data()); cfd->GetName().c_str(), cfd->current()->DebugString().data());
VersionEdit edit; VersionEdit edit;
edit.SetColumnFamily(cfd->GetID()); edit.SetColumnFamily(cfd->GetID());
for (const auto& f : cfd->current()->storage_info()->LevelFiles(level)) { for (const auto& f : vstorage->LevelFiles(level)) {
edit.DeleteFile(level, f->fd.GetNumber()); edit.DeleteFile(level, f->fd.GetNumber());
edit.AddFile(to_level, f->fd.GetNumber(), f->fd.GetPathId(), edit.AddFile(to_level, f->fd.GetNumber(), f->fd.GetPathId(),
f->fd.GetFileSize(), f->smallest, f->largest, f->fd.GetFileSize(), f->smallest, f->largest,

@ -125,7 +125,7 @@ class DBImpl : public DB {
using DB::CompactRange; using DB::CompactRange;
virtual Status CompactRange(ColumnFamilyHandle* column_family, virtual Status CompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end, const Slice* begin, const Slice* end,
bool reduce_level = false, int target_level = -1, bool change_level = false, int target_level = -1,
uint32_t target_path_id = 0) override; uint32_t target_path_id = 0) override;
using DB::CompactFiles; using DB::CompactFiles;

@ -11313,6 +11313,83 @@ TEST_F(DBTest, DynamicLevelMaxBytesBaseInc) {
env_->SetBackgroundThreads(1, Env::HIGH); env_->SetBackgroundThreads(1, Env::HIGH);
} }
TEST_F(DBTest, MigrateToDynamicLevelMaxBytesBase) {
Random rnd(301);
const int kMaxKey = 2000;
Options options;
options.create_if_missing = true;
options.db_write_buffer_size = 2048;
options.write_buffer_size = 2048;
options.max_write_buffer_number = 8;
options.level0_file_num_compaction_trigger = 4;
options.level0_slowdown_writes_trigger = 4;
options.level0_stop_writes_trigger = 8;
options.target_file_size_base = 2048;
options.level_compaction_dynamic_level_bytes = false;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_multiplier = 4;
options.hard_rate_limit = 1.1;
options.num_levels = 8;
DestroyAndReopen(options);
auto verify_func = [&](int num_keys) {
for (int i = 0; i < num_keys; i++) {
ASSERT_NE("NOT_FOUND", Get(Key(kMaxKey + i)));
if (i < num_keys / 10) {
ASSERT_EQ("NOT_FOUND", Get(Key(i)));
} else {
ASSERT_NE("NOT_FOUND", Get(Key(i)));
}
}
};
int total_keys = 1000;
for (int i = 0; i < total_keys; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
ASSERT_OK(Delete(Key(i / 10)));
}
verify_func(total_keys);
dbfull()->TEST_WaitForCompact();
options.level_compaction_dynamic_level_bytes = true;
options.disable_auto_compactions = true;
Reopen(options);
verify_func(total_keys);
std::atomic_bool compaction_finished(false);
// Issue manual compaction in one thread and still verify DB state
// in main thread.
std::thread t([&]() {
dbfull()->CompactRange(nullptr, nullptr, true, options.num_levels - 1);
compaction_finished.store(true);
});
do {
verify_func(total_keys);
} while (!compaction_finished.load());
t.join();
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
int total_keys2 = 2000;
for (int i = total_keys; i < total_keys2; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
ASSERT_OK(Delete(Key(i / 10)));
}
verify_func(total_keys2);
dbfull()->TEST_WaitForCompact();
verify_func(total_keys2);
// Base level is not level 1
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
}
TEST_F(DBTest, DynamicLevelCompressionPerLevel) { TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
if (!Snappy_Supported()) { if (!Snappy_Supported()) {

@ -973,8 +973,8 @@ void VersionStorageInfo::ComputeCompensatedSizes() {
// shape of LSM tree. // shape of LSM tree.
if (file_meta->num_deletions * 2 >= file_meta->num_entries) { if (file_meta->num_deletions * 2 >= file_meta->num_entries) {
file_meta->compensated_file_size += file_meta->compensated_file_size +=
(file_meta->num_deletions * 2 - file_meta->num_entries) (file_meta->num_deletions * 2 - file_meta->num_entries) *
* average_value_size * kDeletionWeightOnCompaction; average_value_size * kDeletionWeightOnCompaction;
} }
} }
} }

@ -417,20 +417,20 @@ class DB {
// Note that after the entire database is compacted, all data are pushed // Note that after the entire database is compacted, all data are pushed
// down to the last level containing any data. If the total data size // down to the last level containing any data. If the total data size
// after compaction is reduced, that level might not be appropriate for // after compaction is reduced, that level might not be appropriate for
// hosting all the files. In this case, client could set reduce_level // hosting all the files. In this case, client could set change_level
// to true, to move the files back to the minimum level capable of holding // to true, to move the files back to the minimum level capable of holding
// the data set or a given level (specified by non-negative target_level). // the data set or a given level (specified by non-negative target_level).
// Compaction outputs should be placed in options.db_paths[target_path_id]. // Compaction outputs should be placed in options.db_paths[target_path_id].
// Behavior is undefined if target_path_id is out of range. // Behavior is undefined if target_path_id is out of range.
virtual Status CompactRange(ColumnFamilyHandle* column_family, virtual Status CompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end, const Slice* begin, const Slice* end,
bool reduce_level = false, int target_level = -1, bool change_level = false, int target_level = -1,
uint32_t target_path_id = 0) = 0; uint32_t target_path_id = 0) = 0;
virtual Status CompactRange(const Slice* begin, const Slice* end, virtual Status CompactRange(const Slice* begin, const Slice* end,
bool reduce_level = false, int target_level = -1, bool change_level = false, int target_level = -1,
uint32_t target_path_id = 0) { uint32_t target_path_id = 0) {
return CompactRange(DefaultColumnFamily(), begin, end, reduce_level, return CompactRange(DefaultColumnFamily(), begin, end, change_level,
target_level, target_path_id); change_level, target_path_id);
} }
virtual Status SetOptions(ColumnFamilyHandle* column_family, virtual Status SetOptions(ColumnFamilyHandle* column_family,
const std::unordered_map<std::string, std::string>& new_options) { const std::unordered_map<std::string, std::string>& new_options) {

@ -129,9 +129,9 @@ class StackableDB : public DB {
using DB::CompactRange; using DB::CompactRange;
virtual Status CompactRange(ColumnFamilyHandle* column_family, virtual Status CompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end, const Slice* begin, const Slice* end,
bool reduce_level = false, int target_level = -1, bool change_level = false, int target_level = -1,
uint32_t target_path_id = 0) override { uint32_t target_path_id = 0) override {
return db_->CompactRange(column_family, begin, end, reduce_level, return db_->CompactRange(column_family, begin, end, change_level,
target_level, target_path_id); target_level, target_path_id);
} }

@ -56,7 +56,7 @@ class CompactedDBImpl : public DBImpl {
using DBImpl::CompactRange; using DBImpl::CompactRange;
virtual Status CompactRange(ColumnFamilyHandle* column_family, virtual Status CompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end, const Slice* begin, const Slice* end,
bool reduce_level = false, int target_level = -1, bool change_level = false, int target_level = -1,
uint32_t target_path_id = 0) override { uint32_t target_path_id = 0) override {
return Status::NotSupported("Not supported in compacted db mode."); return Status::NotSupported("Not supported in compacted db mode.");
} }

Loading…
Cancel
Save