If disable wal is set, then batch commits are avoided.

Summary:
rocksdb uses batch commit to write to transaction log. But if
disable wal is set, then writes to transaction log are anyways
avoided. In this case, there is not much value-add to batch things,
batching can cause unnecessary delays to Puts().
This patch avoids batching when disableWal is set.

Test Plan:
make check.

I am running db_stress now.

Reviewers: haobo

Reviewed By: haobo

CC: leveldb

Differential Revision: https://reviews.facebook.net/D11763
main
Dhruba Borthakur 12 years ago
parent f3baeecd44
commit 6fbe4e981a
  1. 59
      db/db_impl.cc

@ -2270,20 +2270,26 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
StopWatch sw(env_, options_.statistics, DB_WRITE); StopWatch sw(env_, options_.statistics, DB_WRITE);
MutexLock l(&mutex_); MutexLock l(&mutex_);
writers_.push_back(&w);
while (!w.done && &w != writers_.front()) { // If WAL is disabled, we avoid any queueing.
w.cv.Wait(); if (!options.disableWAL) {
} writers_.push_back(&w);
if (w.done) { while (!w.done && &w != writers_.front()) {
return w.status; w.cv.Wait();
}
if (w.done) {
return w.status;
}
} }
// May temporarily unlock and wait. // May temporarily unlock and wait.
Status status = MakeRoomForWrite(my_batch == nullptr); Status status = MakeRoomForWrite(my_batch == nullptr);
uint64_t last_sequence = versions_->LastSequence(); uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w; Writer* last_writer = &w;
if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions
WriteBatch* updates = BuildBatchGroup(&last_writer); WriteBatch* updates = options.disableWAL ? my_batch :
BuildBatchGroup(&last_writer);
const SequenceNumber current_sequence = last_sequence + 1; const SequenceNumber current_sequence = last_sequence + 1;
WriteBatchInternal::SetSequence(updates, current_sequence); WriteBatchInternal::SetSequence(updates, current_sequence);
int my_batch_count = WriteBatchInternal::Count(updates); int my_batch_count = WriteBatchInternal::Count(updates);
@ -2298,12 +2304,12 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
// and protects against concurrent loggers and concurrent writes // and protects against concurrent loggers and concurrent writes
// into mem_. // into mem_.
{ {
mutex_.Unlock();
if (options.disableWAL) { if (options.disableWAL) {
// If WAL is disabled, then we do not drop the mutex. We keep the
// mutex to protect concurrent insertions into the memtable.
flush_on_destroy_ = true; flush_on_destroy_ = true;
} } else {
mutex_.Unlock();
if (!options.disableWAL) {
status = log_->AddRecord(WriteBatchInternal::Contents(updates)); status = log_->AddRecord(WriteBatchInternal::Contents(updates));
if (status.ok() && options.sync) { if (status.ok() && options.sync) {
if (options_.use_fsync) { if (options_.use_fsync) {
@ -2328,25 +2334,29 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
versions_->SetLastSequence(last_sequence); versions_->SetLastSequence(last_sequence);
last_flushed_sequence_ = current_sequence; last_flushed_sequence_ = current_sequence;
} }
mutex_.Lock(); if (!options.disableWAL) {
mutex_.Lock();
}
} }
if (updates == &tmp_batch_) tmp_batch_.Clear(); if (updates == &tmp_batch_) tmp_batch_.Clear();
} }
while (true) { if (!options.disableWAL) {
Writer* ready = writers_.front(); while (true) {
writers_.pop_front(); Writer* ready = writers_.front();
if (ready != &w) { writers_.pop_front();
ready->status = status; if (ready != &w) {
ready->done = true; ready->status = status;
ready->cv.Signal(); ready->done = true;
ready->cv.Signal();
}
if (ready == last_writer) break;
} }
if (ready == last_writer) break;
}
// Notify new head of write queue // Notify new head of write queue
if (!writers_.empty()) { if (!writers_.empty()) {
writers_.front()->cv.Signal(); writers_.front()->cv.Signal();
}
} }
return status; return status;
} }
@ -2410,7 +2420,6 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
// REQUIRES: this thread is currently at the front of the writer queue // REQUIRES: this thread is currently at the front of the writer queue
Status DBImpl::MakeRoomForWrite(bool force) { Status DBImpl::MakeRoomForWrite(bool force) {
mutex_.AssertHeld(); mutex_.AssertHeld();
assert(!writers_.empty());
bool allow_delay = !force; bool allow_delay = !force;
bool allow_rate_limit_delay = !force; bool allow_rate_limit_delay = !force;
uint64_t rate_limit_delay_millis = 0; uint64_t rate_limit_delay_millis = 0;

Loading…
Cancel
Save