Cancel manual compaction in thread-pool queue (#9557)

Summary:
Fix `DisableManualCompaction()` has to wait scheduled manual compaction to start the execution to cancel the job.
When a manual compaction in thread-pool queue is cancel, set the job is_canceled to true and clean the resource.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/9557

Test Plan: added unittest that will hang without the change

Reviewed By: ajkr

Differential Revision: D34214910

Pulled By: jay-zhuang

fbshipit-source-id: 89dbaee78ddf26eb13ce862c2b15f4a098b36a78
main
Jay Zhuang 3 years ago committed by Facebook GitHub Bot
parent ad2cab8f0c
commit a0c569ee1d
  1. 1
      HISTORY.md
  2. 117
      db/db_compaction_test.cc
  3. 1
      db/db_impl/db_impl.h
  4. 181
      db/db_impl/db_impl_compaction_flush.cc

@ -8,6 +8,7 @@
### Performance Improvements ### Performance Improvements
* Mitigated the overhead of building the file location hash table used by the online LSM tree consistency checks, which can improve performance for certain workloads (see #9351). * Mitigated the overhead of building the file location hash table used by the online LSM tree consistency checks, which can improve performance for certain workloads (see #9351).
* Switched to using a sorted `std::vector` instead of `std::map` for storing the metadata objects for blob files, which can improve performance for certain workloads, especially when the number of blob files is high. * Switched to using a sorted `std::vector` instead of `std::map` for storing the metadata objects for blob files, which can improve performance for certain workloads, especially when the number of blob files is high.
* DisableManualCompaction() doesn't have to wait scheduled manual compaction to be executed in thread-pool to cancel the job.
### Public API changes ### Public API changes
* Require C++17 compatible compiler (GCC >= 7, Clang >= 5, Visual Studio >= 2017). See #9388. * Require C++17 compatible compiler (GCC >= 7, Clang >= 5, Visual Studio >= 2017). See #9388.

@ -6797,6 +6797,123 @@ TEST_F(DBCompactionTest, FIFOWarm) {
Destroy(options); Destroy(options);
} }
TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFull) {
const int kNumL0Files = 4;
SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::RunManualCompaction:Scheduled",
"DBCompactionTest::DisableManualCompactionThreadQueueFull:"
"PreDisableManualCompaction"}});
SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = kNumL0Files;
Reopen(options);
// Block compaction queue
test::SleepingBackgroundTask sleeping_task_low;
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
Env::Priority::LOW);
// generate files, but avoid trigger auto compaction
for (int i = 0; i < kNumL0Files / 2; i++) {
ASSERT_OK(Put(Key(1), "value1"));
ASSERT_OK(Put(Key(2), "value2"));
ASSERT_OK(Flush());
}
port::Thread compact_thread([&]() {
CompactRangeOptions cro;
cro.exclusive_manual_compaction = true;
auto s = db_->CompactRange(cro, nullptr, nullptr);
ASSERT_TRUE(s.IsIncomplete());
});
TEST_SYNC_POINT(
"DBCompactionTest::DisableManualCompactionThreadQueueFull:"
"PreDisableManualCompaction");
// Generate more files to trigger auto compaction which is scheduled after
// manual compaction. Has to generate 4 more files because existing files are
// pending compaction
for (int i = 0; i < kNumL0Files; i++) {
ASSERT_OK(Put(Key(1), "value1"));
ASSERT_OK(Put(Key(2), "value2"));
ASSERT_OK(Flush());
}
ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
db_->DisableManualCompaction();
// CompactRange should return before the compaction has the chance to run
compact_thread.join();
sleeping_task_low.WakeUp();
sleeping_task_low.WaitUntilDone();
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_EQ("0,1", FilesPerLevel(0));
}
TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFullDBClose) {
const int kNumL0Files = 4;
SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::RunManualCompaction:Scheduled",
"DBCompactionTest::DisableManualCompactionThreadQueueFull:"
"PreDisableManualCompaction"}});
SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = kNumL0Files;
Reopen(options);
// Block compaction queue
test::SleepingBackgroundTask sleeping_task_low;
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
Env::Priority::LOW);
// generate files, but avoid trigger auto compaction
for (int i = 0; i < kNumL0Files / 2; i++) {
ASSERT_OK(Put(Key(1), "value1"));
ASSERT_OK(Put(Key(2), "value2"));
ASSERT_OK(Flush());
}
port::Thread compact_thread([&]() {
CompactRangeOptions cro;
cro.exclusive_manual_compaction = true;
auto s = db_->CompactRange(cro, nullptr, nullptr);
ASSERT_TRUE(s.IsIncomplete());
});
TEST_SYNC_POINT(
"DBCompactionTest::DisableManualCompactionThreadQueueFull:"
"PreDisableManualCompaction");
// Generate more files to trigger auto compaction which is scheduled after
// manual compaction. Has to generate 4 more files because existing files are
// pending compaction
for (int i = 0; i < kNumL0Files; i++) {
ASSERT_OK(Put(Key(1), "value1"));
ASSERT_OK(Put(Key(2), "value2"));
ASSERT_OK(Flush());
}
ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0));
db_->DisableManualCompaction();
// CompactRange should return before the compaction has the chance to run
compact_thread.join();
// Try close DB while manual compaction is canceled but still in the queue.
// And an auto-triggered compaction is also in the queue.
auto s = db_->Close();
ASSERT_OK(s);
sleeping_task_low.WakeUp();
sleeping_task_low.WaitUntilDone();
}
TEST_F(DBCompactionTest, TEST_F(DBCompactionTest,
DisableManualCompactionDoesNotWaitForDrainingAutomaticCompaction) { DisableManualCompactionDoesNotWaitForDrainingAutomaticCompaction) {
// When `CompactRangeOptions::exclusive_manual_compaction == true`, we wait // When `CompactRangeOptions::exclusive_manual_compaction == true`, we wait

@ -1536,6 +1536,7 @@ class DBImpl : public DB {
ManualCompactionState* manual_compaction_state; // nullptr if non-manual ManualCompactionState* manual_compaction_state; // nullptr if non-manual
// task limiter token is requested during compaction picking. // task limiter token is requested during compaction picking.
std::unique_ptr<TaskLimiterToken> task_token; std::unique_ptr<TaskLimiterToken> task_token;
bool is_canceled = false;
}; };
struct CompactionArg { struct CompactionArg {

@ -1760,7 +1760,7 @@ Status DBImpl::RunManualCompaction(
input_level >= 0); input_level >= 0);
InternalKey begin_storage, end_storage; InternalKey begin_storage, end_storage;
CompactionArg* ca; CompactionArg* ca = nullptr;
bool scheduled = false; bool scheduled = false;
bool manual_conflict = false; bool manual_conflict = false;
@ -1879,6 +1879,16 @@ Status DBImpl::RunManualCompaction(
assert(!exclusive || !manual_conflict); assert(!exclusive || !manual_conflict);
// Running either this or some other manual compaction // Running either this or some other manual compaction
bg_cv_.Wait(); bg_cv_.Wait();
if (manual_compaction_paused_ > 0 && !manual.done &&
!manual.in_progress) {
manual.done = true;
manual.status =
Status::Incomplete(Status::SubCode::kManualCompactionPaused);
if (ca && ca->prepicked_compaction) {
ca->prepicked_compaction->is_canceled = true;
}
break;
}
if (scheduled && manual.incomplete == true) { if (scheduled && manual.incomplete == true) {
assert(!manual.in_progress); assert(!manual.in_progress);
scheduled = false; scheduled = false;
@ -1915,6 +1925,7 @@ Status DBImpl::RunManualCompaction(
&DBImpl::UnscheduleCompactionCallback); &DBImpl::UnscheduleCompactionCallback);
} }
scheduled = true; scheduled = true;
TEST_SYNC_POINT("DBImpl::RunManualCompaction:Scheduled");
} }
} }
@ -2840,91 +2851,106 @@ void DBImpl::BackgroundCallFlush(Env::Priority thread_pri) {
void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction, void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
Env::Priority bg_thread_pri) { Env::Priority bg_thread_pri) {
bool made_progress = false; bool made_progress = false;
JobContext job_context(next_job_id_.fetch_add(1), true);
TEST_SYNC_POINT("BackgroundCallCompaction:0"); TEST_SYNC_POINT("BackgroundCallCompaction:0");
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
immutable_db_options_.info_log.get()); immutable_db_options_.info_log.get());
{ {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
// This call will unlock/lock the mutex to wait for current running if (prepicked_compaction && prepicked_compaction->is_canceled) {
// IngestExternalFile() calls to finish. assert(prepicked_compaction->compaction);
WaitForIngestFile(); ROCKS_LOG_BUFFER(&log_buffer, "[%s] Skip canceled manual compaction job",
prepicked_compaction->compaction->column_family_data()
num_running_compactions_++; ->GetName()
.c_str());
std::unique_ptr<std::list<uint64_t>::iterator> prepicked_compaction->compaction->ReleaseCompactionFiles(
pending_outputs_inserted_elem(new std::list<uint64_t>::iterator( Status::Incomplete(Status::SubCode::kManualCompactionPaused));
CaptureCurrentFileNumberInPendingOutputs())); delete prepicked_compaction->compaction;
} else {
assert((bg_thread_pri == Env::Priority::BOTTOM && JobContext job_context(next_job_id_.fetch_add(1), true);
bg_bottom_compaction_scheduled_) || // This call will unlock/lock the mutex to wait for current running
(bg_thread_pri == Env::Priority::LOW && bg_compaction_scheduled_)); // IngestExternalFile() calls to finish.
Status s = BackgroundCompaction(&made_progress, &job_context, &log_buffer, WaitForIngestFile();
prepicked_compaction, bg_thread_pri);
TEST_SYNC_POINT("BackgroundCallCompaction:1"); num_running_compactions_++;
if (s.IsBusy()) {
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error std::unique_ptr<std::list<uint64_t>::iterator>
mutex_.Unlock(); pending_outputs_inserted_elem(new std::list<uint64_t>::iterator(
immutable_db_options_.clock->SleepForMicroseconds( CaptureCurrentFileNumberInPendingOutputs()));
10000); // prevent hot loop
mutex_.Lock(); assert((bg_thread_pri == Env::Priority::BOTTOM &&
} else if (!s.ok() && !s.IsShutdownInProgress() && bg_bottom_compaction_scheduled_) ||
!s.IsManualCompactionPaused() && !s.IsColumnFamilyDropped()) { (bg_thread_pri == Env::Priority::LOW && bg_compaction_scheduled_));
// Wait a little bit before retrying background compaction in Status s = BackgroundCompaction(&made_progress, &job_context, &log_buffer,
// case this is an environmental problem and we do not want to prepicked_compaction, bg_thread_pri);
// chew up resources for failed compactions for the duration of TEST_SYNC_POINT("BackgroundCallCompaction:1");
// the problem. if (s.IsBusy()) {
uint64_t error_cnt = bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
default_cf_internal_stats_->BumpAndGetBackgroundErrorCount(); mutex_.Unlock();
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error immutable_db_options_.clock->SleepForMicroseconds(
mutex_.Unlock(); 10000); // prevent hot loop
log_buffer.FlushBufferToLog(); mutex_.Lock();
ROCKS_LOG_ERROR(immutable_db_options_.info_log, } else if (!s.ok() && !s.IsShutdownInProgress() &&
"Waiting after background compaction error: %s, " !s.IsManualCompactionPaused() && !s.IsColumnFamilyDropped()) {
"Accumulated background error counts: %" PRIu64, // Wait a little bit before retrying background compaction in
s.ToString().c_str(), error_cnt); // case this is an environmental problem and we do not want to
LogFlush(immutable_db_options_.info_log); // chew up resources for failed compactions for the duration of
immutable_db_options_.clock->SleepForMicroseconds(1000000); // the problem.
mutex_.Lock(); uint64_t error_cnt =
} else if (s.IsManualCompactionPaused()) { default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
ManualCompactionState* m = prepicked_compaction->manual_compaction_state; bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
assert(m); mutex_.Unlock();
ROCKS_LOG_BUFFER(&log_buffer, "[%s] [JOB %d] Manual compaction paused", log_buffer.FlushBufferToLog();
m->cfd->GetName().c_str(), job_context.job_id); ROCKS_LOG_ERROR(immutable_db_options_.info_log,
} "Waiting after background compaction error: %s, "
"Accumulated background error counts: %" PRIu64,
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); s.ToString().c_str(), error_cnt);
LogFlush(immutable_db_options_.info_log);
// If compaction failed, we want to delete all temporary files that we might immutable_db_options_.clock->SleepForMicroseconds(1000000);
// have created (they might not be all recorded in job_context in case of a mutex_.Lock();
// failure). Thus, we force full scan in FindObsoleteFiles() } else if (s.IsManualCompactionPaused()) {
FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress() && assert(prepicked_compaction);
!s.IsManualCompactionPaused() && ManualCompactionState* m =
!s.IsColumnFamilyDropped() && prepicked_compaction->manual_compaction_state;
!s.IsBusy()); assert(m);
TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:FoundObsoleteFiles"); ROCKS_LOG_BUFFER(&log_buffer, "[%s] [JOB %d] Manual compaction paused",
m->cfd->GetName().c_str(), job_context.job_id);
// delete unnecessary files if any, this is done outside the mutex }
if (job_context.HaveSomethingToClean() ||
job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) { ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
mutex_.Unlock();
// Have to flush the info logs before bg_compaction_scheduled_-- // If compaction failed, we want to delete all temporary files that we
// because if bg_flush_scheduled_ becomes 0 and the lock is // might have created (they might not be all recorded in job_context in
// released, the deconstructor of DB can kick in and destroy all the // case of a failure). Thus, we force full scan in FindObsoleteFiles()
// states of DB so info_log might not be available after that point. FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress() &&
// It also applies to access other states that DB owns. !s.IsManualCompactionPaused() &&
log_buffer.FlushBufferToLog(); !s.IsColumnFamilyDropped() &&
if (job_context.HaveSomethingToDelete()) { !s.IsBusy());
PurgeObsoleteFiles(job_context); TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:FoundObsoleteFiles");
TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:PurgedObsoleteFiles");
// delete unnecessary files if any, this is done outside the mutex
if (job_context.HaveSomethingToClean() ||
job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
mutex_.Unlock();
// Have to flush the info logs before bg_compaction_scheduled_--
// because if bg_flush_scheduled_ becomes 0 and the lock is
// released, the deconstructor of DB can kick in and destroy all the
// states of DB so info_log might not be available after that point.
// It also applies to access other states that DB owns.
log_buffer.FlushBufferToLog();
if (job_context.HaveSomethingToDelete()) {
PurgeObsoleteFiles(job_context);
TEST_SYNC_POINT(
"DBImpl::BackgroundCallCompaction:PurgedObsoleteFiles");
}
job_context.Clean();
mutex_.Lock();
} }
job_context.Clean();
mutex_.Lock(); assert(num_running_compactions_ > 0);
num_running_compactions_--;
} }
assert(num_running_compactions_ > 0);
num_running_compactions_--;
if (bg_thread_pri == Env::Priority::LOW) { if (bg_thread_pri == Env::Priority::LOW) {
bg_compaction_scheduled_--; bg_compaction_scheduled_--;
} else { } else {
@ -2943,7 +2969,6 @@ void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
// must be done before we potentially signal the DB close process to // must be done before we potentially signal the DB close process to
// proceed below. // proceed below.
prepicked_compaction->task_token.reset(); prepicked_compaction->task_token.reset();
;
} }
if (made_progress || if (made_progress ||

Loading…
Cancel
Save