disableWAL with WriteImplWALOnly

Summary:
Currently WriteImplWALOnly simply returns when disableWAL is set. This is an incorrect behavior since it does not allocated the sequence number, which is a side-effect of writing to the WAL. This patch fixes the issue.
Closes https://github.com/facebook/rocksdb/pull/3262

Differential Revision: D6550974

Pulled By: maysamyabandeh

fbshipit-source-id: 745a83ae8f04e7ca6c8ffb247d6ef16c287c52e7
main
Maysam Yabandeh 7 years ago committed by Facebook Github Bot
parent 35dfbd58dd
commit 546a63272f
  1. 12
      db/db_impl_write.cc

@ -521,9 +521,6 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options,
PERF_TIMER_GUARD(write_pre_and_post_process_time); PERF_TIMER_GUARD(write_pre_and_post_process_time);
WriteThread::Writer w(write_options, my_batch, callback, log_ref, WriteThread::Writer w(write_options, my_batch, callback, log_ref,
true /* disable_memtable */, pre_release_callback); true /* disable_memtable */, pre_release_callback);
if (write_options.disableWAL) {
return status;
}
RecordTick(stats_, WRITE_WITH_WAL); RecordTick(stats_, WRITE_WITH_WAL);
StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE); StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
@ -580,7 +577,13 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options,
// LastAllocatedSequence is increased inside WriteToWAL under // LastAllocatedSequence is increased inside WriteToWAL under
// wal_write_mutex_ to ensure ordered events in WAL // wal_write_mutex_ to ensure ordered events in WAL
size_t seq_inc = seq_per_batch_ ? write_group.size : 0 /*total_count*/; size_t seq_inc = seq_per_batch_ ? write_group.size : 0 /*total_count*/;
status = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc); if (!write_options.disableWAL) {
status =
ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
} else {
// Otherwise we inc seq number to do solely the seq allocation
last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
}
auto curr_seq = last_sequence + 1; auto curr_seq = last_sequence + 1;
for (auto* writer : write_group) { for (auto* writer : write_group) {
if (writer->CallbackFailed()) { if (writer->CallbackFailed()) {
@ -593,6 +596,7 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options,
// else seq advances only by memtable writes // else seq advances only by memtable writes
} }
if (status.ok() && write_options.sync) { if (status.ok() && write_options.sync) {
assert(!write_options.disableWAL);
// Requesting sync with two_write_queues_ is expected to be very rare. We // Requesting sync with two_write_queues_ is expected to be very rare. We
// hance provide a simple implementation that is not necessarily efficient. // hance provide a simple implementation that is not necessarily efficient.
if (manual_wal_flush_) { if (manual_wal_flush_) {

Loading…
Cancel
Save