|
|
|
@ -38,6 +38,19 @@ class DBTestUniversalCompaction : public DBTestUniversalCompactionBase { |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
namespace { |
|
|
|
|
void VerifyCompactionResult( |
|
|
|
|
const ColumnFamilyMetaData& cf_meta, |
|
|
|
|
const std::set<std::string>& overlapping_file_numbers) { |
|
|
|
|
#ifndef NDEBUG |
|
|
|
|
for (auto& level : cf_meta.levels) { |
|
|
|
|
for (auto& file : level.files) { |
|
|
|
|
assert(overlapping_file_numbers.find(file.name) == |
|
|
|
|
overlapping_file_numbers.end()); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
class KeepFilter : public CompactionFilter { |
|
|
|
|
public: |
|
|
|
|
virtual bool Filter(int level, const Slice& key, const Slice& value, |
|
|
|
@ -289,6 +302,112 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) { |
|
|
|
|
ASSERT_EQ(NumSortedRuns(1), 1); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { |
|
|
|
|
const int kTestKeySize = 16; |
|
|
|
|
const int kTestValueSize = 984; |
|
|
|
|
const int kEntrySize = kTestKeySize + kTestValueSize; |
|
|
|
|
const int kEntriesPerBuffer = 10; |
|
|
|
|
|
|
|
|
|
ChangeCompactOptions(); |
|
|
|
|
Options options; |
|
|
|
|
options.create_if_missing = true; |
|
|
|
|
options.write_buffer_size = kEntrySize * kEntriesPerBuffer; |
|
|
|
|
options.compaction_style = kCompactionStyleLevel; |
|
|
|
|
options.num_levels = 1; |
|
|
|
|
options.target_file_size_base = options.write_buffer_size; |
|
|
|
|
options.compression = kNoCompression; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options); |
|
|
|
|
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal); |
|
|
|
|
Random rnd(301); |
|
|
|
|
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) { |
|
|
|
|
ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize))); |
|
|
|
|
} |
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
ColumnFamilyMetaData cf_meta; |
|
|
|
|
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); |
|
|
|
|
std::vector<std::string> compaction_input_file_names; |
|
|
|
|
for (auto file : cf_meta.levels[0].files) { |
|
|
|
|
if (rnd.OneIn(2)) { |
|
|
|
|
compaction_input_file_names.push_back(file.name); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (compaction_input_file_names.size() == 0) { |
|
|
|
|
compaction_input_file_names.push_back( |
|
|
|
|
cf_meta.levels[0].files[0].name); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// expect fail since universal compaction only allow L0 output
|
|
|
|
|
ASSERT_TRUE(!dbfull()->CompactFiles( |
|
|
|
|
CompactionOptions(), handles_[1], |
|
|
|
|
compaction_input_file_names, 1).ok()); |
|
|
|
|
|
|
|
|
|
// expect ok and verify the compacted files no longer exist.
|
|
|
|
|
ASSERT_OK(dbfull()->CompactFiles( |
|
|
|
|
CompactionOptions(), handles_[1], |
|
|
|
|
compaction_input_file_names, 0)); |
|
|
|
|
|
|
|
|
|
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); |
|
|
|
|
VerifyCompactionResult( |
|
|
|
|
cf_meta, |
|
|
|
|
std::set<std::string>(compaction_input_file_names.begin(), |
|
|
|
|
compaction_input_file_names.end())); |
|
|
|
|
|
|
|
|
|
compaction_input_file_names.clear(); |
|
|
|
|
|
|
|
|
|
// Pick the first and the last file, expect everything is
|
|
|
|
|
// compacted into one single file.
|
|
|
|
|
compaction_input_file_names.push_back( |
|
|
|
|
cf_meta.levels[0].files[0].name); |
|
|
|
|
compaction_input_file_names.push_back( |
|
|
|
|
cf_meta.levels[0].files[ |
|
|
|
|
cf_meta.levels[0].files.size() - 1].name); |
|
|
|
|
ASSERT_OK(dbfull()->CompactFiles( |
|
|
|
|
CompactionOptions(), handles_[1], |
|
|
|
|
compaction_input_file_names, 0)); |
|
|
|
|
|
|
|
|
|
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); |
|
|
|
|
ASSERT_EQ(cf_meta.levels[0].files.size(), 1U); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) { |
|
|
|
|
Options options; |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.num_levels = 7; |
|
|
|
|
options.disable_auto_compactions = true; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
DestroyAndReopen(options); |
|
|
|
|
|
|
|
|
|
// Generate 3 overlapping files
|
|
|
|
|
Random rnd(301); |
|
|
|
|
for (int i = 0; i < 210; i++) { |
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush()); |
|
|
|
|
|
|
|
|
|
for (int i = 200; i < 300; i++) { |
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush()); |
|
|
|
|
|
|
|
|
|
for (int i = 250; i < 260; i++) { |
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush()); |
|
|
|
|
|
|
|
|
|
ASSERT_EQ("3", FilesPerLevel(0)); |
|
|
|
|
// Compact all files into 1 file and put it in L4
|
|
|
|
|
CompactRangeOptions compact_options; |
|
|
|
|
compact_options.change_level = true; |
|
|
|
|
compact_options.target_level = 4; |
|
|
|
|
db_->CompactRange(compact_options, nullptr, nullptr); |
|
|
|
|
ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DBTestUniversalCompactionMultiLevels |
|
|
|
|
: public DBTestUniversalCompactionBase { |
|
|
|
|
public: |
|
|
|
@ -662,6 +781,385 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio2) { |
|
|
|
|
ASSERT_LT(TotalSize(), 120000U * 12 * 0.8 + 120000 * 2); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Test that checks trivial move in universal compaction
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) { |
|
|
|
|
int32_t trivial_move = 0; |
|
|
|
|
int32_t non_trivial_move = 0; |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack( |
|
|
|
|
"DBImpl::BackgroundCompaction:TrivialMove", |
|
|
|
|
[&](void* arg) { trivial_move++; }); |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack( |
|
|
|
|
"DBImpl::BackgroundCompaction:NonTrivial", |
|
|
|
|
[&](void* arg) { non_trivial_move++; }); |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); |
|
|
|
|
|
|
|
|
|
Options options; |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.compaction_options_universal.allow_trivial_move = true; |
|
|
|
|
options.num_levels = 3; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.level0_file_num_compaction_trigger = 3; |
|
|
|
|
options.max_background_compactions = 1; |
|
|
|
|
options.target_file_size_base = 32 * 1024; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
DestroyAndReopen(options); |
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options); |
|
|
|
|
|
|
|
|
|
// Trigger compaction if size amplification exceeds 110%
|
|
|
|
|
options.compaction_options_universal.max_size_amplification_percent = 110; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
|
|
|
|
|
Random rnd(301); |
|
|
|
|
int num_keys = 150000; |
|
|
|
|
for (int i = 0; i < num_keys; i++) { |
|
|
|
|
ASSERT_OK(Put(1, Key(i), Key(i))); |
|
|
|
|
} |
|
|
|
|
std::vector<std::string> values; |
|
|
|
|
|
|
|
|
|
ASSERT_OK(Flush(1)); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
|
|
|
|
|
ASSERT_GT(trivial_move, 0); |
|
|
|
|
ASSERT_EQ(non_trivial_move, 0); |
|
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing(); |
|
|
|
|
} |
|
|
|
|
// Test that checks trivial move in universal compaction
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) { |
|
|
|
|
int32_t trivial_move = 0; |
|
|
|
|
int32_t non_trivial_move = 0; |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack( |
|
|
|
|
"DBImpl::BackgroundCompaction:TrivialMove", |
|
|
|
|
[&](void* arg) { trivial_move++; }); |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack( |
|
|
|
|
"DBImpl::BackgroundCompaction:NonTrivial", |
|
|
|
|
[&](void* arg) { non_trivial_move++; }); |
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing(); |
|
|
|
|
|
|
|
|
|
Options options; |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.compaction_options_universal.allow_trivial_move = true; |
|
|
|
|
options.num_levels = 15; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.level0_file_num_compaction_trigger = 8; |
|
|
|
|
options.max_background_compactions = 4; |
|
|
|
|
options.target_file_size_base = 64 * 1024; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
DestroyAndReopen(options); |
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options); |
|
|
|
|
|
|
|
|
|
// Trigger compaction if size amplification exceeds 110%
|
|
|
|
|
options.compaction_options_universal.max_size_amplification_percent = 110; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
|
|
|
|
|
Random rnd(301); |
|
|
|
|
int num_keys = 500000; |
|
|
|
|
for (int i = 0; i < num_keys; i++) { |
|
|
|
|
ASSERT_OK(Put(1, Key(i), Key(i))); |
|
|
|
|
} |
|
|
|
|
std::vector<std::string> values; |
|
|
|
|
|
|
|
|
|
ASSERT_OK(Flush(1)); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
|
|
|
|
|
ASSERT_GT(trivial_move, 0); |
|
|
|
|
ASSERT_EQ(non_trivial_move, 0); |
|
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) { |
|
|
|
|
Options options; |
|
|
|
|
options.db_paths.emplace_back(dbname_, 300 * 1024); |
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024); |
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024); |
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024); |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.level0_file_num_compaction_trigger = 2; |
|
|
|
|
options.num_levels = 1; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
|
|
|
|
|
std::vector<std::string> filenames; |
|
|
|
|
env_->GetChildren(options.db_paths[1].path, &filenames); |
|
|
|
|
// Delete archival files.
|
|
|
|
|
for (size_t i = 0; i < filenames.size(); ++i) { |
|
|
|
|
env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); |
|
|
|
|
} |
|
|
|
|
env_->DeleteDir(options.db_paths[1].path); |
|
|
|
|
Reopen(options); |
|
|
|
|
|
|
|
|
|
Random rnd(301); |
|
|
|
|
int key_idx = 0; |
|
|
|
|
|
|
|
|
|
// First three 110KB files are not going to second path.
|
|
|
|
|
// After that, (100K, 200K)
|
|
|
|
|
for (int num = 0; num < 3; num++) { |
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Another 110KB triggers a compaction to 400K file to second path
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
|
|
|
|
|
// (1, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1,1,4) -> (2, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(0, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 2, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 2, 4) -> (8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
|
|
|
|
|
// (1, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 8) -> (2, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
|
|
|
|
|
// (1, 2, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 2, 8) -> (4, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
|
|
|
|
|
// (1, 4, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < key_idx; i++) { |
|
|
|
|
auto v = Get(Key(i)); |
|
|
|
|
ASSERT_NE(v, "NOT_FOUND"); |
|
|
|
|
ASSERT_TRUE(v.size() == 1 || v.size() == 10000); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Reopen(options); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < key_idx; i++) { |
|
|
|
|
auto v = Get(Key(i)); |
|
|
|
|
ASSERT_NE(v, "NOT_FOUND"); |
|
|
|
|
ASSERT_TRUE(v.size() == 1 || v.size() == 10000); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Destroy(options); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { |
|
|
|
|
std::function<void(int)> verify_func = [&](int num_keys_in_db) { |
|
|
|
|
std::string keys_in_db; |
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); |
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { |
|
|
|
|
keys_in_db.append(iter->key().ToString()); |
|
|
|
|
keys_in_db.push_back(','); |
|
|
|
|
} |
|
|
|
|
delete iter; |
|
|
|
|
|
|
|
|
|
std::string expected_keys; |
|
|
|
|
for (int i = 0; i <= num_keys_in_db; i++) { |
|
|
|
|
expected_keys.append(Key(i)); |
|
|
|
|
expected_keys.push_back(','); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ASSERT_EQ(keys_in_db, expected_keys); |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
Random rnd(301); |
|
|
|
|
int max_key1 = 200; |
|
|
|
|
int max_key2 = 600; |
|
|
|
|
int max_key3 = 800; |
|
|
|
|
|
|
|
|
|
// Stage 1: open a DB with universal compaction, num_levels=1
|
|
|
|
|
Options options = CurrentOptions(); |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.num_levels = 1; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.level0_file_num_compaction_trigger = 3; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i <= max_key1; i++) { |
|
|
|
|
// each value is 10K
|
|
|
|
|
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush(1)); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
|
|
|
|
|
int non_level0_num_files = 0; |
|
|
|
|
for (int i = 1; i < options.num_levels; i++) { |
|
|
|
|
non_level0_num_files += NumTableFilesAtLevel(i, 1); |
|
|
|
|
} |
|
|
|
|
ASSERT_EQ(non_level0_num_files, 0); |
|
|
|
|
|
|
|
|
|
// Stage 2: reopen with universal compaction, num_levels=4
|
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.num_levels = 4; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
|
|
|
|
|
verify_func(max_key1); |
|
|
|
|
|
|
|
|
|
// Insert more keys
|
|
|
|
|
for (int i = max_key1 + 1; i <= max_key2; i++) { |
|
|
|
|
// each value is 10K
|
|
|
|
|
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush(1)); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
|
|
|
|
|
verify_func(max_key2); |
|
|
|
|
// Compaction to non-L0 has happened.
|
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 1), 0); |
|
|
|
|
|
|
|
|
|
// Stage 3: Revert it back to one level and revert to num_levels=1.
|
|
|
|
|
options.num_levels = 4; |
|
|
|
|
options.target_file_size_base = INT_MAX; |
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
// Compact all to level 0
|
|
|
|
|
CompactRangeOptions compact_options; |
|
|
|
|
compact_options.change_level = true; |
|
|
|
|
compact_options.target_level = 0; |
|
|
|
|
dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); |
|
|
|
|
// Need to restart it once to remove higher level records in manifest.
|
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
// Final reopen
|
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.num_levels = 1; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options); |
|
|
|
|
|
|
|
|
|
// Insert more keys
|
|
|
|
|
for (int i = max_key2 + 1; i <= max_key3; i++) { |
|
|
|
|
// each value is 10K
|
|
|
|
|
ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); |
|
|
|
|
} |
|
|
|
|
ASSERT_OK(Flush(1)); |
|
|
|
|
dbfull()->TEST_WaitForCompact(); |
|
|
|
|
verify_func(max_key3); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) { |
|
|
|
|
if (!Snappy_Supported()) { |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
|
Options options; |
|
|
|
|
options.db_paths.emplace_back(dbname_, 500 * 1024); |
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024); |
|
|
|
|
options.compaction_style = kCompactionStyleUniversal; |
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
|
options.level0_file_num_compaction_trigger = 2; |
|
|
|
|
options.num_levels = 1; |
|
|
|
|
options = CurrentOptions(options); |
|
|
|
|
|
|
|
|
|
std::vector<std::string> filenames; |
|
|
|
|
env_->GetChildren(options.db_paths[1].path, &filenames); |
|
|
|
|
// Delete archival files.
|
|
|
|
|
for (size_t i = 0; i < filenames.size(); ++i) { |
|
|
|
|
env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); |
|
|
|
|
} |
|
|
|
|
env_->DeleteDir(options.db_paths[1].path); |
|
|
|
|
Reopen(options); |
|
|
|
|
|
|
|
|
|
Random rnd(301); |
|
|
|
|
int key_idx = 0; |
|
|
|
|
|
|
|
|
|
// First three 110KB files are not going to second path.
|
|
|
|
|
// After that, (100K, 200K)
|
|
|
|
|
for (int num = 0; num < 3; num++) { |
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Another 110KB triggers a compaction to 400K file to second path
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
|
|
|
|
|
// (1, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1,1,4) -> (2, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 2, 4)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(2, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 2, 4) -> (8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(0, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 8) -> (2, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 2, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(2, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 1, 2, 8) -> (4, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(0, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
// (1, 4, 8)
|
|
|
|
|
GenerateNewFile(&rnd, &key_idx); |
|
|
|
|
ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); |
|
|
|
|
ASSERT_EQ(1, GetSstFileCount(dbname_)); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < key_idx; i++) { |
|
|
|
|
auto v = Get(Key(i)); |
|
|
|
|
ASSERT_NE(v, "NOT_FOUND"); |
|
|
|
|
ASSERT_TRUE(v.size() == 1 || v.size() == 10000); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Reopen(options); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < key_idx; i++) { |
|
|
|
|
auto v = Get(Key(i)); |
|
|
|
|
ASSERT_NE(v, "NOT_FOUND"); |
|
|
|
|
ASSERT_TRUE(v.size() == 1 || v.size() == 10000); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Destroy(options); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(UniversalCompactionNumLevels, DBTestUniversalCompaction, |
|
|
|
|
::testing::Values(1, 3, 5)); |
|
|
|
|
|
|
|
|
@ -731,6 +1229,7 @@ TEST_P(DBTestUniversalManualCompactionOutputPathId, |
|
|
|
|
INSTANTIATE_TEST_CASE_P(DBTestUniversalManualCompactionOutputPathId, |
|
|
|
|
DBTestUniversalManualCompactionOutputPathId, |
|
|
|
|
::testing::Values(1, 8)); |
|
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
|
|
int main(int argc, char** argv) { |
|
|
|
|