Compaction should not move data to up level (#8116)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/8116

Reviewed By: ajkr, mrambacher

Differential Revision: D27353828

Pulled By: jay-zhuang

fbshipit-source-id: 42703fb01b04d92cc097d7979e64798448852e88
main
Jay Zhuang 3 years ago committed by Facebook GitHub Bot
parent 24b7ebee80
commit a037bb35e9
  1. 1
      HISTORY.md
  2. 19
      db/compact_files_test.cc
  3. 9
      db/compaction/compaction_picker.cc

@ -2,6 +2,7 @@
## Unreleased
### Behavior Changes
* `ColumnFamilyOptions::sample_for_compression` now takes effect for creation of all block-based tables. Previously it only took effect for block-based tables created by flush.
* `CompactFiles()` can no longer compact files from lower level to up level, which has the risk to corrupt DB (details: #8063). The validation is also added to all compactions.
### Bug Fixes
* Use thread-safe `strerror_r()` to get error messages.

@ -149,7 +149,7 @@ TEST_F(CompactFilesTest, MultipleLevel) {
ASSERT_OK(db->Put(WriteOptions(), ToString(0), ""));
ASSERT_OK(db->Flush(FlushOptions()));
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
ColumnFamilyMetaData meta;
db->GetColumnFamilyMetaData(&meta);
// Compact files except the file in L3
std::vector<std::string> files;
@ -160,11 +160,11 @@ TEST_F(CompactFilesTest, MultipleLevel) {
}
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
SyncPoint::GetInstance()->LoadDependency({
{"CompactionJob::Run():Start", "CompactFilesTest.MultipleLevel:0"},
{"CompactFilesTest.MultipleLevel:1", "CompactFilesImpl:3"},
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
SyncPoint::GetInstance()->EnableProcessing();
std::thread thread([&] {
TEST_SYNC_POINT("CompactFilesTest.MultipleLevel:0");
@ -174,8 +174,17 @@ TEST_F(CompactFilesTest, MultipleLevel) {
TEST_SYNC_POINT("CompactFilesTest.MultipleLevel:1");
});
ASSERT_OK(db->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), files, 5));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
// Compaction cannot move up the data to higher level
// here we have input file from level 5, so the output level has to be >= 5
for (int invalid_output_level = 0; invalid_output_level < 5;
invalid_output_level++) {
s = db->CompactFiles(CompactionOptions(), files, invalid_output_level);
std::cout << s.ToString() << std::endl;
ASSERT_TRUE(s.IsInvalidArgument());
}
ASSERT_OK(db->CompactFiles(CompactionOptions(), files, 5));
SyncPoint::GetInstance()->DisableProcessing();
thread.join();
delete db;

@ -1004,6 +1004,7 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
// any currently-existing files.
for (auto file_num : *input_files) {
bool found = false;
int input_file_level = -1;
for (const auto& level_meta : cf_meta.levels) {
for (const auto& file_meta : level_meta.files) {
if (file_num == TableFileNameToNumber(file_meta.name)) {
@ -1013,6 +1014,7 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
" is already being compacted.");
}
found = true;
input_file_level = level_meta.level;
break;
}
}
@ -1025,6 +1027,13 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
"Specified compaction input file " + MakeTableFileName("", file_num) +
" does not exist in column family " + cf_meta.name + ".");
}
if (input_file_level > output_level) {
return Status::InvalidArgument(
"Cannot compact file to up level, input file: " +
MakeTableFileName("", file_num) + " level " +
ToString(input_file_level) + " > output level " +
ToString(output_level));
}
}
return Status::OK();

Loading…
Cancel
Save