@ -15,10 +15,17 @@
namespace rocksdb {
namespace rocksdb {
WriteThread : : WriteThread ( uint64_t max_yield_usec , uint64_t slow_yield_usec )
WriteThread : : WriteThread ( const ImmutableDBOptions & db_options )
: max_yield_usec_ ( max_yield_usec ) ,
: max_yield_usec_ ( db_options . enable_write_thread_adaptive_yield
slow_yield_usec_ ( slow_yield_usec ) ,
? db_options . write_thread_max_yield_usec
newest_writer_ ( nullptr ) { }
: 0 ) ,
slow_yield_usec_ ( db_options . write_thread_slow_yield_usec ) ,
allow_concurrent_memtable_write_ (
db_options . allow_concurrent_memtable_write ) ,
enable_pipelined_write_ ( db_options . enable_pipelined_write ) ,
newest_writer_ ( nullptr ) ,
newest_memtable_writer_ ( nullptr ) ,
last_sequence_ ( 0 ) { }
uint8_t WriteThread : : BlockingAwaitState ( Writer * w , uint8_t goal_mask ) {
uint8_t WriteThread : : BlockingAwaitState ( Writer * w , uint8_t goal_mask ) {
// We're going to block. Lazily create the mutex. We guarantee
// We're going to block. Lazily create the mutex. We guarantee
@ -184,22 +191,39 @@ void WriteThread::SetState(Writer* w, uint8_t new_state) {
}
}
}
}
void WriteThread : : LinkOne ( Writer * w , bool * linked_as_leader ) {
bool WriteThread : : LinkOne ( Writer * w , std : : atomic < Writer * > * newest_writer ) {
assert ( newest_writer ! = nullptr ) ;
assert ( w - > state = = STATE_INIT ) ;
assert ( w - > state = = STATE_INIT ) ;
Writer * writers = newest_writer - > load ( std : : memory_order_relaxed ) ;
while ( true ) {
while ( true ) {
Writer * writers = newest_writer_ . load ( std : : memory_order_relaxed ) ;
w - > link_older = writers ;
w - > link_older = writers ;
if ( newest_writer_ . compare_exchange_strong ( writers , w ) ) {
if ( newest_writer - > compare_exchange_weak ( writers , w ) ) {
if ( writers = = nullptr ) {
return ( writers = = nullptr ) ;
// this isn't part of the WriteThread machinery, but helps with
}
// debugging and is checked by an assert in WriteImpl
}
w - > state . store ( STATE_GROUP_LEADER , std : : memory_order_relaxed ) ;
}
}
// Then we are the head of the queue and hence definiltly the leader
bool WriteThread : : LinkGroup ( WriteGroup & write_group ,
* linked_as_leader = ( writers = = nullptr ) ;
std : : atomic < Writer * > * newest_writer ) {
// Otherwise we will wait for previous leader to define our status
assert ( newest_writer ! = nullptr ) ;
return ;
Writer * leader = write_group . leader ;
Writer * last_writer = write_group . last_writer ;
Writer * w = last_writer ;
while ( true ) {
// Unset link_newer pointers to make sure when we call
// CreateMissingNewerLinks later it create all missing links.
w - > link_newer = nullptr ;
w - > write_group = nullptr ;
if ( w = = leader ) {
break ;
}
w = w - > link_older ;
}
Writer * newest = newest_writer - > load ( std : : memory_order_relaxed ) ;
while ( true ) {
leader - > link_older = newest ;
if ( newest_writer - > compare_exchange_weak ( newest , last_writer ) ) {
return ( newest = = nullptr ) ;
}
}
}
}
}
}
@ -216,12 +240,43 @@ void WriteThread::CreateMissingNewerLinks(Writer* head) {
}
}
}
}
void WriteThread : : CompleteLeader ( WriteGroup & write_group ) {
assert ( write_group . size > 0 ) ;
Writer * leader = write_group . leader ;
if ( write_group . size = = 1 ) {
write_group . leader = nullptr ;
write_group . last_writer = nullptr ;
} else {
assert ( leader - > link_newer ! = nullptr ) ;
leader - > link_newer - > link_older = nullptr ;
write_group . leader = leader - > link_newer ;
}
write_group . size - = 1 ;
SetState ( leader , STATE_COMPLETED ) ;
}
void WriteThread : : CompleteFollower ( Writer * w , WriteGroup & write_group ) {
assert ( write_group . size > 1 ) ;
assert ( w ! = write_group . leader ) ;
if ( w = = write_group . last_writer ) {
w - > link_older - > link_newer = nullptr ;
write_group . last_writer = w - > link_older ;
} else {
w - > link_older - > link_newer = w - > link_newer ;
w - > link_newer - > link_older = w - > link_older ;
}
write_group . size - = 1 ;
SetState ( w , STATE_COMPLETED ) ;
}
void WriteThread : : JoinBatchGroup ( Writer * w ) {
void WriteThread : : JoinBatchGroup ( Writer * w ) {
static AdaptationContext ctx ( " JoinBatchGroup " ) ;
static AdaptationContext ctx ( " JoinBatchGroup " ) ;
assert ( w - > batch ! = nullptr ) ;
assert ( w - > batch ! = nullptr ) ;
bool linked_as_leader ;
bool linked_as_leader = LinkOne ( w , & newest_writer_ ) ;
LinkOne ( w , & linked_as_leader ) ;
if ( linked_as_leader ) {
SetState ( w , STATE_GROUP_LEADER ) ;
}
TEST_SYNC_POINT_CALLBACK ( " WriteThread::JoinBatchGroup:Wait " , w ) ;
TEST_SYNC_POINT_CALLBACK ( " WriteThread::JoinBatchGroup:Wait " , w ) ;
@ -231,23 +286,28 @@ void WriteThread::JoinBatchGroup(Writer* w) {
* 1 ) An existing leader pick us as the new leader when it finishes
* 1 ) An existing leader pick us as the new leader when it finishes
* 2 ) An existing leader pick us as its follewer and
* 2 ) An existing leader pick us as its follewer and
* 2.1 ) finishes the memtable writes on our behalf
* 2.1 ) finishes the memtable writes on our behalf
* 2.2 ) Or tell us to finish the memtable writes it in pralallel
* 2.2 ) Or tell us to finish the memtable writes in pralallel
* 3 ) ( pipelined write ) An existing leader pick us as its follower and
* finish book - keeping and WAL write for us , enqueue us as pending
* memtable writer , and
* 3.1 ) we become memtable writer group leader , or
* 3.2 ) an existing memtable writer group leader tell us to finish memtable
* writes in parallel .
*/
*/
AwaitState ( w ,
AwaitState ( w , STATE_GROUP_LEADER | STATE_MEMTABLE_WRITER_LEADER |
STATE_GROUP_LEADER | STATE_PARALLEL_FOLLOWER | STATE_COMPLETED ,
STATE_PARALLEL_MEMTABLE_WRIT ER | STATE_COMPLETED ,
& ctx ) ;
& ctx ) ;
TEST_SYNC_POINT_CALLBACK ( " WriteThread::JoinBatchGroup:DoneWaiting " , w ) ;
TEST_SYNC_POINT_CALLBACK ( " WriteThread::JoinBatchGroup:DoneWaiting " , w ) ;
}
}
}
}
size_t WriteThread : : EnterAsBatchGroupLeader (
size_t WriteThread : : EnterAsBatchGroupLeader ( Writer * leader ,
Writer * leader , WriteThread : : Writer * * last_writer ,
WriteGroup * write_group ) {
autovector < WriteThread : : Writer * > * write_batch_group ) {
assert ( leader - > link_older = = nullptr ) ;
assert ( leader - > link_older = = nullptr ) ;
assert ( leader - > batch ! = nullptr ) ;
assert ( leader - > batch ! = nullptr ) ;
assert ( write_group ! = nullptr ) ;
size_t size = WriteBatchInternal : : ByteSize ( leader - > batch ) ;
size_t size = WriteBatchInternal : : ByteSize ( leader - > batch ) ;
write_batch_group - > push_back ( leader ) ;
// Allow the group to grow up to a maximum size, but if the
// Allow the group to grow up to a maximum size, but if the
// original write is small, limit the growth so we do not slow
// original write is small, limit the growth so we do not slow
@ -257,8 +317,10 @@ size_t WriteThread::EnterAsBatchGroupLeader(
max_size = size + ( 128 < < 10 ) ;
max_size = size + ( 128 < < 10 ) ;
}
}
* last_writer = leader ;
leader - > write_group = write_group ;
write_group - > leader = leader ;
write_group - > last_writer = leader ;
write_group - > size = 1 ;
Writer * newest_writer = newest_writer_ . load ( std : : memory_order_acquire ) ;
Writer * newest_writer = newest_writer_ . load ( std : : memory_order_acquire ) ;
// This is safe regardless of any db mutex status of the caller. Previous
// This is safe regardless of any db mutex status of the caller. Previous
@ -308,136 +370,268 @@ size_t WriteThread::EnterAsBatchGroupLeader(
break ;
break ;
}
}
w - > write_group = write_group ;
size + = batch_size ;
size + = batch_size ;
write_batch_group - > push_back ( w ) ;
write_group - > last_writer = w ;
w - > in_batch_group = true ;
write_group - > size + + ;
* last_writer = w ;
}
}
return size ;
return size ;
}
}
void WriteThread : : LaunchParallelFollowers ( ParallelGroup * pg ,
void WriteThread : : EnterAsMemTableWriter ( Writer * leader ,
SequenceNumber sequence ) {
WriteGroup * write_group ) {
// EnterAsBatchGroupLeader already created the links from leader to
assert ( leader ! = nullptr ) ;
// newer writers in the group
assert ( leader - > link_older = = nullptr ) ;
assert ( leader - > batch ! = nullptr ) ;
assert ( write_group ! = nullptr ) ;
size_t size = WriteBatchInternal : : ByteSize ( leader - > batch ) ;
// Allow the group to grow up to a maximum size, but if the
// original write is small, limit the growth so we do not slow
// down the small write too much.
size_t max_size = 1 < < 20 ;
if ( size < = ( 128 < < 10 ) ) {
max_size = size + ( 128 < < 10 ) ;
}
leader - > write_group = write_group ;
write_group - > leader = leader ;
write_group - > size = 1 ;
Writer * last_writer = leader ;
if ( ! allow_concurrent_memtable_write_ | | ! leader - > batch - > HasMerge ( ) ) {
Writer * newest_writer = newest_memtable_writer_ . load ( ) ;
CreateMissingNewerLinks ( newest_writer ) ;
Writer * w = leader ;
while ( w ! = newest_writer ) {
w = w - > link_newer ;
if ( w - > batch = = nullptr ) {
break ;
}
if ( w - > batch - > HasMerge ( ) ) {
break ;
}
pg - > leader - > parallel_group = pg ;
if ( ! allow_concurrent_memtable_write_ ) {
auto batch_size = WriteBatchInternal : : ByteSize ( w - > batch ) ;
if ( size + batch_size > max_size ) {
// Do not make batch too big
break ;
}
size + = batch_size ;
}
Writer * w = pg - > leader ;
w - > write_group = write_group ;
w - > sequence = sequence ;
last_writer = w ;
write_group - > size + + ;
}
}
write_group - > last_writer = last_writer ;
write_group - > last_sequence =
last_writer - > sequence + WriteBatchInternal : : Count ( last_writer - > batch ) - 1 ;
}
// Initialize and wake up the others
void WriteThread : : ExitAsMemTableWriter ( Writer * self , WriteGroup & write_group ) {
while ( w ! = pg - > last_writer ) {
Writer * leader = write_group . leader ;
// Writers that won't write don't get sequence allotment
Writer * last_writer = write_group . last_writer ;
if ( ! w - > CallbackFailed ( ) & & w - > ShouldWriteToMemtable ( ) ) {
// There is a sequence number of each written key
Writer * newest_writer = last_writer ;
sequence + = WriteBatchInternal : : Count ( w - > batch ) ;
if ( ! newest_memtable_writer_ . compare_exchange_strong ( newest_writer ,
nullptr ) ) {
CreateMissingNewerLinks ( newest_writer ) ;
Writer * next_leader = last_writer - > link_newer ;
assert ( next_leader ! = nullptr ) ;
next_leader - > link_older = nullptr ;
SetState ( next_leader , STATE_MEMTABLE_WRITER_LEADER ) ;
}
Writer * w = leader ;
while ( true ) {
if ( ! write_group . status . ok ( ) ) {
w - > status = write_group . status ;
}
}
w = w - > link_newer ;
Writer * next = w - > link_newer ;
if ( w ! = leader ) {
SetState ( w , STATE_COMPLETED ) ;
}
if ( w = = last_writer ) {
break ;
}
w = next ;
}
// Note that leader has to exit last, since it owns the write group.
SetState ( leader , STATE_COMPLETED ) ;
}
w - > sequence = sequence ; // sequence number for the first key in the batch
void WriteThread : : LaunchParallelMemTableWriters ( WriteGroup * write_group ) {
w - > parallel_group = pg ;
assert ( write_group ! = nullptr ) ;
SetState ( w , STATE_PARALLEL_FOLLOWER ) ;
write_group - > running . store ( write_group - > size ) ;
for ( auto w : * write_group ) {
SetState ( w , STATE_PARALLEL_MEMTABLE_WRITER ) ;
}
}
}
}
// This method is called by both the leader and parallel followers
// This method is called by both the leader and parallel followers
bool WriteThread : : CompleteParallelWorker ( Writer * w ) {
bool WriteThread : : CompleteParallelMemTableWrit er ( Writer * w ) {
static AdaptationContext ctx ( " CompleteParallelWorker " ) ;
static AdaptationContext ctx ( " CompleteParallelMemTableWrit er " ) ;
auto * pg = w - > parallel_group ;
auto * write_grou p = w - > write _group;
if ( ! w - > status . ok ( ) ) {
if ( ! w - > status . ok ( ) ) {
std : : lock_guard < std : : mutex > guard ( pg - > leader - > StateMutex ( ) ) ;
std : : lock_guard < std : : mutex > guard ( write_grou p- > leader - > StateMutex ( ) ) ;
pg - > status = w - > status ;
write_grou p- > status = w - > status ;
}
}
if ( pg - > running . load ( std : : memory_order_acquire ) > 1 & & pg - > running - - > 1 ) {
if ( write_group - > running - - > 1 ) {
// we're not the last one
// we're not the last one
AwaitState ( w , STATE_COMPLETED , & ctx ) ;
AwaitState ( w , STATE_COMPLETED , & ctx ) ;
return false ;
return false ;
}
}
// else we're the last parallel worker and should perform exit duties.
// else we're the last parallel worker and should perform exit duties.
w - > status = pg - > status ;
w - > status = write_grou p- > status ;
return true ;
return true ;
}
}
void WriteThread : : ExitAsBatchGroupFollower ( Writer * w ) {
void WriteThread : : ExitAsBatchGroupFollower ( Writer * w ) {
auto * pg = w - > parallel _group;
auto * write_grou p = w - > write _group;
assert ( w - > state = = STATE_PARALLEL_FOLLOW ER ) ;
assert ( w - > state = = STATE_PARALLEL_MEMTABLE_WRIT ER ) ;
assert ( pg - > status . ok ( ) ) ;
assert ( write_grou p- > status . ok ( ) ) ;
ExitAsBatchGroupLeader ( pg - > leader , pg - > last_writer , pg - > status ) ;
ExitAsBatchGroupLeader ( * write_group , write_group - > status ) ;
assert ( w - > status . ok ( ) ) ;
assert ( w - > status . ok ( ) ) ;
assert ( w - > state = = STATE_COMPLETED ) ;
assert ( w - > state = = STATE_COMPLETED ) ;
SetState ( pg - > leader , STATE_COMPLETED ) ;
SetState ( write_grou p- > leader , STATE_COMPLETED ) ;
}
}
void WriteThread : : ExitAsBatchGroupLeader ( Writer * leader , Writer * last_writer ,
void WriteThread : : ExitAsBatchGroupLeader ( WriteGroup & write_group ,
Status status ) {
Status status ) {
static AdaptationContext ctx ( " ExitAsBatchGroupLeader " ) ;
Writer * leader = write_group . leader ;
Writer * last_writer = write_group . last_writer ;
assert ( leader - > link_older = = nullptr ) ;
assert ( leader - > link_older = = nullptr ) ;
Writer * head = newest_writer_ . load ( std : : memory_order_acquire ) ;
if ( enable_pipelined_write_ ) {
if ( head ! = last_writer | |
// Notify writers don't write to memtable to exit.
! newest_writer_ . compare_exchange_strong ( head , nullptr ) ) {
for ( Writer * w = last_writer ; w ! = leader ; ) {
// Either w wasn't the head during the load(), or it was the head
Writer * next = w - > link_older ;
// during the load() but somebody else pushed onto the list before
w - > status = status ;
// we did the compare_exchange_strong (causing it to fail). In the
if ( ! w - > ShouldWriteToMemtable ( ) ) {
// latter case compare_exchange_strong has the effect of re-reading
CompleteFollower ( w , write_group ) ;
// its first param (head). No need to retry a failing CAS, because
}
// only a departing leader (which we are at the moment) can remove
w = next ;
// nodes from the list.
}
assert ( head ! = last_writer ) ;
if ( ! leader - > ShouldWriteToMemtable ( ) ) {
CompleteLeader ( write_group ) ;
// After walking link_older starting from head (if not already done)
}
// we will be able to traverse w->link_newer below. This function
// Link the ramaining of the group to memtable writer list.
// can only be called from an active leader, only a leader can
if ( write_group . size > 0 ) {
// clear newest_writer_, we didn't, and only a clear newest_writer_
if ( LinkGroup ( write_group , & newest_memtable_writer_ ) ) {
// could cause the next leader to start their work without a call
// The leader can now be different from current writer.
// to MarkJoined, so we can definitely conclude that no other leader
SetState ( write_group . leader , STATE_MEMTABLE_WRITER_LEADER ) ;
// work is going on here (with or without db mutex).
}
CreateMissingNewerLinks ( head ) ;
}
assert ( last_writer - > link_newer - > link_older = = last_writer ) ;
// Reset newest_writer_ and wake up the next leader.
last_writer - > link_newer - > link_older = nullptr ;
Writer * newest_writer = last_writer ;
if ( ! newest_writer_ . compare_exchange_strong ( newest_writer , nullptr ) ) {
// Next leader didn't self-identify, because newest_writer_ wasn't
Writer * next_leader = newest_writer ;
// nullptr when they enqueued (we were definitely enqueued before them
while ( next_leader - > link_older ! = last_writer ) {
// and are still in the list). That means leader handoff occurs when
next_leader = next_leader - > link_older ;
// we call MarkJoined
assert ( next_leader ! = nullptr ) ;
SetState ( last_writer - > link_newer , STATE_GROUP_LEADER ) ;
}
}
next_leader - > link_older = nullptr ;
// else nobody else was waiting, although there might already be a new
SetState ( next_leader , STATE_GROUP_LEADER ) ;
// leader now
}
AwaitState ( leader , STATE_MEMTABLE_WRITER_LEADER |
while ( last_writer ! = leader ) {
STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED ,
last_writer - > status = status ;
& ctx ) ;
// we need to read link_older before calling SetState, because as soon
} else {
// as it is marked committed the other thread's Await may return and
Writer * head = newest_writer_ . load ( std : : memory_order_acquire ) ;
// deallocate the Writer.
if ( head ! = last_writer | |
auto next = last_writer - > link_older ;
! newest_writer_ . compare_exchange_strong ( head , nullptr ) ) {
SetState ( last_writer , STATE_COMPLETED ) ;
// Either w wasn't the head during the load(), or it was the head
// during the load() but somebody else pushed onto the list before
last_writer = next ;
// we did the compare_exchange_strong (causing it to fail). In the
// latter case compare_exchange_strong has the effect of re-reading
// its first param (head). No need to retry a failing CAS, because
// only a departing leader (which we are at the moment) can remove
// nodes from the list.
assert ( head ! = last_writer ) ;
// After walking link_older starting from head (if not already done)
// we will be able to traverse w->link_newer below. This function
// can only be called from an active leader, only a leader can
// clear newest_writer_, we didn't, and only a clear newest_writer_
// could cause the next leader to start their work without a call
// to MarkJoined, so we can definitely conclude that no other leader
// work is going on here (with or without db mutex).
CreateMissingNewerLinks ( head ) ;
assert ( last_writer - > link_newer - > link_older = = last_writer ) ;
last_writer - > link_newer - > link_older = nullptr ;
// Next leader didn't self-identify, because newest_writer_ wasn't
// nullptr when they enqueued (we were definitely enqueued before them
// and are still in the list). That means leader handoff occurs when
// we call MarkJoined
SetState ( last_writer - > link_newer , STATE_GROUP_LEADER ) ;
}
// else nobody else was waiting, although there might already be a new
// leader now
while ( last_writer ! = leader ) {
last_writer - > status = status ;
// we need to read link_older before calling SetState, because as soon
// as it is marked committed the other thread's Await may return and
// deallocate the Writer.
auto next = last_writer - > link_older ;
SetState ( last_writer , STATE_COMPLETED ) ;
last_writer = next ;
}
}
}
}
}
void WriteThread : : EnterUnbatched ( Writer * w , InstrumentedMutex * mu ) {
void WriteThread : : EnterUnbatched ( Writer * w , InstrumentedMutex * mu ) {
static AdaptationContext ctx ( " EnterUnbatched " ) ;
static AdaptationContext ctx ( " EnterUnbatched " ) ;
assert ( w ! = nullptr & & w - > batch = = nullptr ) ;
assert ( w - > batch = = nullptr ) ;
mu - > Unlock ( ) ;
bool linked_as_leader ;
bool linked_as_leader = LinkOne ( w , & newest_writer_ ) ;
LinkOne ( w , & linked_as_leader ) ;
if ( ! linked_as_leader ) {
if ( ! linked_as_leader ) {
mu - > Unlock ( ) ;
TEST_SYNC_POINT ( " WriteThread::EnterUnbatched:Wait " ) ;
TEST_SYNC_POINT ( " WriteThread::EnterUnbatched:Wait " ) ;
// Last leader will not pick us as a follower since our batch is nullptr
// Last leader will not pick us as a follower since our batch is nullptr
AwaitState ( w , STATE_GROUP_LEADER , & ctx ) ;
AwaitState ( w , STATE_GROUP_LEADER , & ctx ) ;
mu - > Lock ( ) ;
}
}
if ( enable_pipelined_write_ ) {
WaitForMemTableWriters ( ) ;
}
mu - > Lock ( ) ;
}
}
void WriteThread : : ExitUnbatched ( Writer * w ) {
void WriteThread : : ExitUnbatched ( Writer * w ) {
Status dummy_status ;
assert ( w ! = nullptr ) ;
ExitAsBatchGroupLeader ( w , w , dummy_status ) ;
Writer * newest_writer = w ;
if ( ! newest_writer_ . compare_exchange_strong ( newest_writer , nullptr ) ) {
CreateMissingNewerLinks ( newest_writer ) ;
Writer * next_leader = w - > link_newer ;
assert ( next_leader ! = nullptr ) ;
next_leader - > link_older = nullptr ;
SetState ( next_leader , STATE_GROUP_LEADER ) ;
}
}
void WriteThread : : WaitForMemTableWriters ( ) {
static AdaptationContext ctx ( " WaitForMemTableWriters " ) ;
assert ( enable_pipelined_write_ ) ;
if ( newest_memtable_writer_ . load ( ) = = nullptr ) {
return ;
}
Writer w ;
if ( ! LinkOne ( & w , & newest_memtable_writer_ ) ) {
AwaitState ( & w , STATE_MEMTABLE_WRITER_LEADER , & ctx ) ;
}
newest_memtable_writer_ . store ( nullptr ) ;
}
}
} // namespace rocksdb
} // namespace rocksdb