@ -24,6 +24,7 @@
// kTypeEndPrepareXID
// kTypeCommitXID varstring
// kTypeRollbackXID varstring
// kTypeBeginPersistedPrepareXID varstring
// kTypeNoop
// varstring :=
// len: varint32
@ -353,6 +354,9 @@ Status ReadRecordFromWriteBatch(Slice* input, char* tag,
break ;
case kTypeNoop :
case kTypeBeginPrepareXID :
// This indicates that the prepared batch is also persisted in the db.
// This is used in WritePreparedTxn
case kTypeBeginPersistedPrepareXID :
break ;
case kTypeEndPrepareXID :
if ( ! GetLengthPrefixedSlice ( input , xid ) ) {
@ -457,6 +461,24 @@ Status WriteBatch::Iterate(Handler* handler) const {
( ContentFlags : : DEFERRED | ContentFlags : : HAS_BEGIN_PREPARE ) ) ;
handler - > MarkBeginPrepare ( ) ;
empty_batch = false ;
if ( ! handler - > WriteAfterCommit ( ) ) {
s = Status : : NotSupported (
" WriteCommitted txn tag when write_after_commit_ is disabled (in "
" WritePrepared mode). If it is not due to corruption, the WAL "
" must be emptied before changing the WritePolicy. " ) ;
}
break ;
case kTypeBeginPersistedPrepareXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
( ContentFlags : : DEFERRED | ContentFlags : : HAS_BEGIN_PREPARE ) ) ;
handler - > MarkBeginPrepare ( ) ;
empty_batch = false ;
if ( handler - > WriteAfterCommit ( ) ) {
s = Status : : NotSupported (
" WritePrepared txn tag when write_after_commit_ is enabled (in "
" default WriteCommitted mode). If it is not due to corruption, "
" the WAL must be emptied before changing the WritePolicy. " ) ;
}
break ;
case kTypeEndPrepareXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
@ -575,7 +597,8 @@ Status WriteBatchInternal::InsertNoop(WriteBatch* b) {
return Status : : OK ( ) ;
}
Status WriteBatchInternal : : MarkEndPrepare ( WriteBatch * b , const Slice & xid ) {
Status WriteBatchInternal : : MarkEndPrepare ( WriteBatch * b , const Slice & xid ,
bool write_after_commit ) {
// a manually constructed batch can only contain one prepare section
assert ( b - > rep_ [ 12 ] = = static_cast < char > ( kTypeNoop ) ) ;
@ -587,7 +610,9 @@ Status WriteBatchInternal::MarkEndPrepare(WriteBatch* b, const Slice& xid) {
}
// rewrite noop as begin marker
b - > rep_ [ 12 ] = static_cast < char > ( kTypeBeginPrepareXID ) ;
b - > rep_ [ 12 ] =
static_cast < char > ( write_after_commit ? kTypeBeginPrepareXID
: kTypeBeginPersistedPrepareXID ) ;
b - > rep_ . push_back ( static_cast < char > ( kTypeEndPrepareXID ) ) ;
PutLengthPrefixedSlice ( & b - > rep_ , xid ) ;
b - > content_flags_ . store ( b - > content_flags_ . load ( std : : memory_order_relaxed ) |
@ -920,6 +945,9 @@ class MemTableInserter : public WriteBatch::Handler {
return * reinterpret_cast < MemPostInfoMap * > ( & mem_post_info_map_ ) ;
}
protected :
virtual bool WriteAfterCommit ( ) const override { return write_after_commit_ ; }
public :
// cf_mems should not be shared with concurrent inserters
MemTableInserter ( SequenceNumber _sequence , ColumnFamilyMemTables * cf_mems ,
@ -957,6 +985,8 @@ class MemTableInserter : public WriteBatch::Handler {
MemTableInserter ( const MemTableInserter & ) = delete ;
MemTableInserter & operator = ( const MemTableInserter & ) = delete ;
virtual bool WriterAfterCommit ( ) const { return write_after_commit_ ; }
void MaybeAdvanceSeq ( bool batch_boundry = false ) {
if ( batch_boundry = = seq_per_batch_ ) {
sequence_ + + ;