diff --git a/HISTORY.md b/HISTORY.md index 113381d8c..bc40fac4f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -14,6 +14,7 @@ * Fix a memory leak when files with range tombstones are read in mmap mode and block cache is enabled * Fix handling of corrupt range tombstone blocks such that corruptions cannot cause deleted keys to reappear * Lock free MultiGet +* Fix with pipelined write, write leaders's callback failure lead to the whole write group fail. ## 5.18.0 (11/30/2018) ### New Features diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 3a239ee5c..885832f5a 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -475,7 +475,7 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, PERF_TIMER_STOP(write_pre_and_post_process_time); - if (w.ShouldWriteToWAL()) { + if (w.status.ok() && !write_options.disableWAL) { PERF_TIMER_GUARD(write_wal_time); stats->AddDBStats(InternalStats::WRITE_DONE_BY_SELF, 1); RecordTick(stats_, WRITE_DONE_BY_SELF, 1); @@ -504,7 +504,7 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, WriteThread::WriteGroup memtable_write_group; if (w.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) { PERF_TIMER_GUARD(write_memtable_time); - assert(w.status.ok()); + assert(w.ShouldWriteToMemtable()); write_thread_.EnterAsMemTableWriter(&w, &memtable_write_group); if (memtable_write_group.size > 1 && immutable_db_options_.allow_concurrent_memtable_write) { diff --git a/db/write_thread.h b/db/write_thread.h index a3802c996..dc9c22ff8 100644 --- a/db/write_thread.h +++ b/db/write_thread.h @@ -127,7 +127,7 @@ class WriteThread { std::atomic state; // write under StateMutex() or pre-link WriteGroup* write_group; SequenceNumber sequence; // the sequence number to use for the first key - Status status; // status of memtable inserter + Status status; Status callback_status; // status returned by callback->Callback() std::aligned_storage::type state_mutex_bytes;