Remove wait_unscheduled from waitForCompact internal API (#11443)

Summary:
Context:

In pull request https://github.com/facebook/rocksdb/issues/11436, we are introducing a new public API `waitForCompact(const WaitForCompactOptions& wait_for_compact_options)`. This API invokes the internal implementation `waitForCompact(bool wait_unscheduled=false)`. The unscheduled parameter indicates the compactions that are not yet scheduled but are required to process items in the queue.

In certain cases, we are unable to wait for compactions, such as during a shutdown or when background jobs are paused. It is important to return the appropriate status in these scenarios. For all other cases, we should wait for all compaction and flush jobs, including the unscheduled ones. The primary purpose of this new API is to wait until the system has resolved its compaction debt. Currently, the usage of `wait_unscheduled` is limited to test code.

This pull request eliminates the usage of wait_unscheduled. The internal `waitForCompact()` API now waits for unscheduled compactions unless the db is undergoing a shutdown. In the event of a shutdown, the API returns `Status::ShutdownInProgress()`.

Additionally, a new parameter, `abort_on_pause`, has been introduced with a default value of `false`. This parameter addresses the possibility of waiting indefinitely for unscheduled jobs if `PauseBackgroundWork()` was called before `waitForCompact()` is invoked. By setting `abort_on_pause` to `true`, the API will immediately return `Status::Aborted`.

Furthermore, all tests that previously called `waitForCompact(true)` have been fixed.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11443

Test Plan:
Existing tests that involve a shutdown in progress:

- DBCompactionTest::CompactRangeShutdownWhileDelayed
- DBTestWithParam::PreShutdownMultipleCompaction
- DBTestWithParam::PreShutdownCompactionMiddle

Reviewed By: pdillinger

Differential Revision: D45923426

Pulled By: jaykorean

fbshipit-source-id: 7dc93fe6a6841a7d9d2d72866fa647090dba8eae
oxigraph-8.3.2
Jay Huh 2 years ago committed by Facebook GitHub Bot
parent 206fdea3d9
commit 586d78b31e
  1. 3
      db/compaction/compaction_service_test.cc
  2. 40
      db/compaction/tiered_compaction_test.cc
  3. 10
      db/db_compaction_test.cc
  4. 18
      db/db_impl/db_impl.h
  5. 20
      db/db_impl/db_impl_compaction_flush.cc
  6. 4
      db/db_impl/db_impl_debug.cc
  7. 12
      db/db_sst_test.cc
  8. 6
      db/db_test.cc
  9. 32
      db/db_test2.cc
  10. 8
      db/error_handler_fs_test.cc
  11. 3
      db/external_sst_file_test.cc
  12. 6
      db/seqno_time_test.cc
  13. 2
      db_stress_tool/db_stress_test_base.cc
  14. 20
      microbench/db_basic_bench.cc
  15. 1
      utilities/transactions/transaction_test.cc
  16. 1
      utilities/transactions/write_prepared_transaction_test.cc

@ -928,7 +928,7 @@ TEST_F(CompactionServiceTest, TablePropertiesCollector) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_OK(db_->GetPropertiesOfAllTables(&fname_to_props));
@ -952,4 +952,3 @@ int main(int argc, char** argv) {
RegisterCustomObjects(argc, argv);
return RUN_ALL_TESTS();
}

@ -209,7 +209,7 @@ TEST_P(TieredCompactionTest, SequenceBasedTieredStorageUniversal) {
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
expect_stats[0].Add(kBasicFlushStats);
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// the penultimate level file temperature is not cold, all data are output to
// the penultimate level.
@ -374,7 +374,7 @@ TEST_P(TieredCompactionTest, RangeBasedTieredStorageUniversal) {
ASSERT_OK(Flush());
expect_stats[0].Add(kBasicFlushStats);
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
@ -445,8 +445,8 @@ TEST_P(TieredCompactionTest, RangeBasedTieredStorageUniversal) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(
true)); // make sure the compaction is able to finish
// make sure the compaction is able to finish
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
@ -911,7 +911,7 @@ TEST_P(TieredCompactionTest, SequenceBasedTieredStorageLevel) {
ASSERT_OK(Flush());
expect_stats[0].Add(kBasicFlushStats);
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// non last level is hot
ASSERT_EQ("0,1", FilesPerLevel());
@ -954,7 +954,7 @@ TEST_P(TieredCompactionTest, SequenceBasedTieredStorageLevel) {
ASSERT_OK(Flush());
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,1,0,0,0,0,1", FilesPerLevel());
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
@ -1005,7 +1005,7 @@ TEST_P(TieredCompactionTest, SequenceBasedTieredStorageLevel) {
ASSERT_OK(Flush());
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
latest_cold_seq = seq_history[0];
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
@ -1135,7 +1135,7 @@ TEST_P(TieredCompactionTest, RangeBasedTieredStorageLevel) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
@ -1265,7 +1265,7 @@ TEST_F(PrecludeLastLevelTest, MigrationFromPreserveTimeManualCompaction) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// all data is pushed to the last level
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
@ -1327,7 +1327,7 @@ TEST_F(PrecludeLastLevelTest, MigrationFromPreserveTimeAutoCompaction) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// all data is pushed to the last level
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
@ -1360,7 +1360,7 @@ TEST_F(PrecludeLastLevelTest, MigrationFromPreserveTimeAutoCompaction) {
});
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
}
// all data is moved up to the penultimate level
@ -1403,7 +1403,7 @@ TEST_F(PrecludeLastLevelTest, MigrationFromPreserveTimePartial) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// all data is pushed to the last level
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
@ -1530,7 +1530,7 @@ TEST_F(PrecludeLastLevelTest, LastLevelOnlyCompactionPartial) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// all data is pushed to the last level
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
@ -1609,7 +1609,7 @@ TEST_P(PrecludeLastLevelTestWithParms, LastLevelOnlyCompactionNoPreclude) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// all data is pushed to the last level
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
@ -1705,7 +1705,7 @@ TEST_P(PrecludeLastLevelTestWithParms, LastLevelOnlyCompactionNoPreclude) {
manual_compaction_thread.join();
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
if (enable_preclude_last_level) {
ASSERT_NE("0,0,0,0,0,1,1", FilesPerLevel());
@ -1841,7 +1841,7 @@ TEST_P(PrecludeLastLevelTestWithParms, PeriodicCompactionToPenultimateLevel) {
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
stop_token.reset();
@ -1940,7 +1940,7 @@ TEST_F(PrecludeLastLevelTest, PartialPenultimateLevelCompaction) {
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// L5: [0,19] [20,39] [40,299]
// L6: [0, 299]
@ -2106,7 +2106,7 @@ TEST_F(PrecludeLastLevelTest, RangeDelsCauseFileEndpointsToOverlap) {
Slice begin_key(begin_key_buf), end_key(end_key_buf);
ASSERT_OK(db_->SuggestCompactRange(db_->DefaultColumnFamily(), &begin_key,
&end_key));
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,3,3", FilesPerLevel());
ASSERT_EQ(1, per_key_comp_num);
verify_db();
@ -2116,7 +2116,7 @@ TEST_F(PrecludeLastLevelTest, RangeDelsCauseFileEndpointsToOverlap) {
db_->ReleaseSnapshot(snap2);
ASSERT_OK(db_->SuggestCompactRange(db_->DefaultColumnFamily(), &begin_key,
&end_key));
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,3,3", FilesPerLevel());
ASSERT_EQ(2, per_key_comp_num);
verify_db();
@ -2126,7 +2126,7 @@ TEST_F(PrecludeLastLevelTest, RangeDelsCauseFileEndpointsToOverlap) {
db_->ReleaseSnapshot(snap1);
ASSERT_OK(db_->SuggestCompactRange(db_->DefaultColumnFamily(), &begin_key,
&end_key));
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_EQ("0,0,0,0,0,2,3", FilesPerLevel());
ASSERT_EQ(3, per_key_comp_num);
verify_db();

@ -1105,7 +1105,7 @@ TEST_F(DBCompactionTest, CompactionSstPartitionerNonTrivial) {
ASSERT_OK(Put("bbbb1", "B"));
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
std::vector<LiveFileMetaData> files;
dbfull()->GetLiveFilesMetaData(&files);
@ -5075,7 +5075,11 @@ TEST_F(DBCompactionTest, CompactRangeShutdownWhileDelayed) {
manual_compaction_thread.join();
TEST_SYNC_POINT(
"DBCompactionTest::CompactRangeShutdownWhileDelayed:PostManual");
if (i == 0) {
ASSERT_OK(dbfull()->TEST_WaitForCompact());
} else {
ASSERT_NOK(dbfull()->TEST_WaitForCompact());
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
}
@ -8714,7 +8718,7 @@ TEST_F(DBCompactionTest, DisableMultiManualCompaction) {
sleeping_task_low.WakeUp();
sleeping_task_low.WaitUntilDone();
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
}
TEST_F(DBCompactionTest, DisableJustStartedManualCompaction) {
@ -8846,7 +8850,7 @@ TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFull) {
sleeping_task_low.WakeUp();
sleeping_task_low.WaitUntilDone();
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,1", FilesPerLevel(0));
}

@ -1053,10 +1053,16 @@ class DBImpl : public DB {
VersionSet* GetVersionSet() const { return versions_.get(); }
// Wait for any compaction
// We add a bool parameter to wait for unscheduledCompactions_ == 0, but this
// is only for the special test of CancelledCompactions
Status WaitForCompact(bool waitUnscheduled = false);
// Wait for all flush and compactions jobs to finish. Jobs to wait include the
// unscheduled (queued, but not scheduled yet). If the db is shutting down,
// Status::ShutdownInProgress will be returned. If PauseBackgroundWork() was
// called prior to this, this may potentially wait for unscheduled jobs
// indefinitely. abort_on_pause can be set to true to abort, and
// Status::Aborted will be returned immediately. This may also never return if
// there's sufficient ongoing writes that keeps flush and compaction going
// without stopping. The user would have to cease all the writes to DB to make
// this eventually return in a stable state.
Status WaitForCompact(bool abort_on_pause = false);
#ifndef NDEBUG
// Compact any files in the named level that overlap [*begin, *end]
@ -1096,9 +1102,7 @@ class DBImpl : public DB {
Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
// Wait for any compaction
// We add a bool parameter to wait for unscheduledCompactions_ == 0, but this
// is only for the special test of CancelledCompactions
Status TEST_WaitForCompact(bool waitUnscheduled = false);
Status TEST_WaitForCompact(bool abort_on_pause = false);
// Wait for any background purge
Status TEST_WaitForPurge();

@ -3957,16 +3957,24 @@ void DBImpl::GetSnapshotContext(
*snapshot_seqs = snapshots_.GetAll(earliest_write_conflict_snapshot);
}
Status DBImpl::WaitForCompact(bool wait_unscheduled) {
// Wait until the compaction completes
Status DBImpl::WaitForCompact(bool abort_on_pause) {
InstrumentedMutexLock l(&mutex_);
while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
bg_flush_scheduled_ ||
(wait_unscheduled && unscheduled_compactions_)) &&
for (;;) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status::ShutdownInProgress();
}
if (bg_work_paused_ && abort_on_pause) {
return Status::Aborted();
}
if ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
bg_flush_scheduled_ || unscheduled_compactions_ ||
unscheduled_flushes_) &&
(error_handler_.GetBGError().ok())) {
bg_cv_.Wait();
}
} else {
return error_handler_.GetBGError();
}
}
}
} // namespace ROCKSDB_NAMESPACE

@ -178,9 +178,9 @@ Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
return WaitForFlushMemTable(cfd, nullptr, false);
}
Status DBImpl::TEST_WaitForCompact(bool wait_unscheduled) {
Status DBImpl::TEST_WaitForCompact(bool abort_on_pause) {
// Wait until the compaction completes
return WaitForCompact(wait_unscheduled);
return WaitForCompact(abort_on_pause);
}
Status DBImpl::TEST_WaitForPurge() {

@ -743,7 +743,7 @@ TEST_P(DBSSTTestRateLimit, RateLimitedDelete) {
// Compaction will move the 4 files in L0 to trash and create 1 L1 file
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,1", FilesPerLevel(0));
uint64_t delete_start_time = env_->NowMicros();
@ -815,7 +815,7 @@ TEST_F(DBSSTTest, RateLimitedWALDelete) {
CompactRangeOptions cro;
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,1", FilesPerLevel(0));
sfm->WaitForEmptyTrash();
@ -1224,7 +1224,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) {
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Because we set a callback in CancelledCompaction, we actually
// let the compaction run
@ -1281,7 +1281,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
.IsCompactionTooLarge());
// Wait for manual compaction to get scheduled and finish
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
// Make sure the stat is bumped
@ -1297,7 +1297,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
.IsCompactionTooLarge());
// Wait for manual compaction to get scheduled and finish
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(dbfull()->immutable_db_options().statistics.get()->getTickerCount(
COMPACTION_CANCELLED),
@ -1314,7 +1314,7 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(),
l0_files, 0));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
ASSERT_GT(completed_compactions, 0);

@ -583,7 +583,7 @@ TEST_F(DBTest, LevelReopenWithFIFO) {
TryReopenWithColumnFamilies({"default", "pikachu"}, fifo_options));
// For FIFO to pick a compaction
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
ASSERT_OK(dbfull()->TEST_WaitForCompact(false));
ASSERT_OK(dbfull()->TEST_WaitForBackgroundWork());
for (int g = 0; g < kKeyCount; ++g) {
std::string get_key = std::string(1, char('a' + g));
int status_index = i / kKeyCount;
@ -4937,7 +4937,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
CancelAllBackgroundWork(db_);
TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown");
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_OK(dbfull()->TEST_WaitForBackgroundWork());
// Record the number of compactions at a time.
for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
operation_count[i] = 0;
@ -5024,7 +5024,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
CancelAllBackgroundWork(db_);
TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown");
TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown");
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_OK(dbfull()->TEST_WaitForBackgroundWork());
// Record the number of compactions at a time.
for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
operation_count[i] = 0;

@ -3044,7 +3044,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {
.IsManualCompactionPaused());
// Wait for compactions to get scheduled and stopped
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Get file names after compaction is stopped
files_meta.clear();
@ -3064,7 +3064,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {
files_before_compact, 0)
.IsManualCompactionPaused());
// Wait for manual compaction to get scheduled and finish
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
files_meta.clear();
files_after_compact.clear();
@ -3097,7 +3097,7 @@ TEST_F(DBTest2, PausingManualCompaction2) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
std::vector<LiveFileMetaData> files_meta;
dbfull()->GetLiveFilesMetaData(&files_meta);
@ -3139,7 +3139,7 @@ TEST_F(DBTest2, PausingManualCompaction3) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// As manual compaction disabled, not even reach sync point
ASSERT_EQ(run_manual_compactions, 0);
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
@ -3148,7 +3148,7 @@ TEST_F(DBTest2, PausingManualCompaction3) {
"CompactionJob::Run():PausingManualCompaction:1");
dbfull()->EnableManualCompaction();
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -3203,14 +3203,14 @@ TEST_F(DBTest2, PausingManualCompaction4) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(run_manual_compactions, 1);
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
"CompactionJob::Run():PausingManualCompaction:2");
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -3262,7 +3262,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Since compactions are disabled, we shouldn't start compacting.
// E.g. we should call the compaction function exactly one time.
@ -3286,7 +3286,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(compactions_run, 3);
@ -3298,7 +3298,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
// Compactions should work again if we re-enable them..
compact_options.canceled->store(false, std::memory_order_relaxed);
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -3362,7 +3362,7 @@ TEST_F(DBTest2, CancelManualCompaction2) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// NOTE: as we set compact_options.max_subcompacitons = 1, and store true to
// the canceled variable from the single compacting thread (via callback),
@ -3380,7 +3380,7 @@ TEST_F(DBTest2, CancelManualCompaction2) {
// Compactions should work again if we re-enable them..
compact_options.canceled->store(false, std::memory_order_relaxed);
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -3455,7 +3455,7 @@ TEST_F(DBTest2, CancelManualCompactionWithListener) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_GT(listener->num_compaction_started_, 0);
ASSERT_EQ(listener->num_compaction_started_, listener->num_compaction_ended_);
@ -3470,7 +3470,7 @@ TEST_F(DBTest2, CancelManualCompactionWithListener) {
ASSERT_TRUE(dbfull()
->CompactRange(compact_options, nullptr, nullptr)
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(listener->num_compaction_started_, 0);
ASSERT_EQ(listener->num_compaction_started_, listener->num_compaction_ended_);
@ -3492,7 +3492,7 @@ TEST_F(DBTest2, CancelManualCompactionWithListener) {
compact_options.canceled->store(false, std::memory_order_release);
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_GT(listener->num_compaction_started_, 0);
ASSERT_EQ(listener->num_compaction_started_, listener->num_compaction_ended_);
@ -5823,7 +5823,7 @@ TEST_F(DBTest2, SameSmallestInSameLevel) {
ASSERT_OK(Flush());
ASSERT_OK(db_->Merge(WriteOptions(), "key", "8"));
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ("0,4,1", FilesPerLevel());
ASSERT_EQ("2,3,4,5,6,7,8", Get("key"));

@ -1578,7 +1578,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
}
for (auto i = 0; i < kNumDbInstances; ++i) {
Status s = static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true);
Status s = static_cast<DBImpl*>(db[i])->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), Status::Severity::kSoftError);
fault_fs[i]->SetFilesystemActive(true);
}
@ -1587,7 +1587,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
for (auto i = 0; i < kNumDbInstances; ++i) {
std::string prop;
ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true);
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact());
EXPECT_TRUE(db[i]->GetProperty(
"rocksdb.num-files-at-level" + std::to_string(0), &prop));
EXPECT_EQ(atoi(prop.c_str()), 0);
@ -1701,7 +1701,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
}
for (auto i = 0; i < kNumDbInstances; ++i) {
Status s = static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true);
Status s = static_cast<DBImpl*>(db[i])->TEST_WaitForCompact();
switch (i) {
case 0:
ASSERT_EQ(s.severity(), Status::Severity::kSoftError);
@ -1723,7 +1723,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true);
}
if (i == 1) {
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact());
}
EXPECT_TRUE(db[i]->GetProperty(
"rocksdb.num-files-at-level" + std::to_string(0), &prop));

@ -1289,7 +1289,7 @@ TEST_F(ExternalSSTFileTest, IngestNonExistingFile) {
ASSERT_OK(Flush());
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_OK(dbfull()->TEST_WaitForCompact());
// After full compaction, there should be only 1 file.
std::vector<std::string> files;
@ -2857,4 +2857,3 @@ int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -93,7 +93,7 @@ TEST_F(SeqnoTimeTest, TemperatureBasicUniversal) {
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
// All data is hot, only output to penultimate level
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
@ -114,7 +114,7 @@ TEST_F(SeqnoTimeTest, TemperatureBasicUniversal) {
});
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
}
@ -128,7 +128,7 @@ TEST_F(SeqnoTimeTest, TemperatureBasicUniversal) {
});
}
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->WaitForCompact(true));
ASSERT_OK(dbfull()->WaitForCompact());
}
CompactRangeOptions cro;

@ -2671,7 +2671,7 @@ void StressTest::Open(SharedState* shared) {
// wait for all compactions to finish to make sure DB is in
// clean state before executing queries.
s = static_cast_with_check<DBImpl>(db_->GetRootDB())
->WaitForCompact(true /* wait_unscheduled */);
->WaitForCompact();
if (!s.ok()) {
for (auto cf : column_families_) {
delete cf;

@ -303,7 +303,7 @@ static void DBPut(benchmark::State& state) {
if (state.thread_index() == 0) {
auto db_full = static_cast_with_check<DBImpl>(db.get());
Status s = db_full->WaitForCompact(true);
Status s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -410,7 +410,7 @@ static void ManualCompaction(benchmark::State& state) {
if (state.thread_index() == 0) {
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -510,7 +510,7 @@ static void ManualFlush(benchmark::State& state) {
if (state.thread_index() == 0) {
auto db_full = static_cast_with_check<DBImpl>(db.get());
Status s = db_full->WaitForCompact(true);
Status s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -594,7 +594,7 @@ static void DBGet(benchmark::State& state) {
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -707,7 +707,7 @@ static void SimpleGetWithPerfContext(benchmark::State& state) {
}
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -1115,7 +1115,7 @@ static void IteratorSeek(benchmark::State& state) {
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -1206,7 +1206,7 @@ static void IteratorNext(benchmark::State& state) {
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -1270,7 +1270,7 @@ static void IteratorNextWithPerfContext(benchmark::State& state) {
}
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
Status s = db_full->WaitForCompact(true);
Status s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -1368,7 +1368,7 @@ static void IteratorPrev(benchmark::State& state) {
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;
@ -1460,7 +1460,7 @@ static void PrefixSeek(benchmark::State& state) {
}
auto db_full = static_cast_with_check<DBImpl>(db.get());
s = db_full->WaitForCompact(true);
s = db_full->WaitForCompact();
if (!s.ok()) {
state.SkipWithError(s.ToString().c_str());
return;

@ -6749,4 +6749,3 @@ int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -4064,4 +4064,3 @@ int main(int argc, char** argv) {
}
return RUN_ALL_TESTS();
}

Loading…
Cancel
Save