@ -20,6 +20,11 @@
// kTypeColumnFamilyDeletion varint32 varstring varstring
// kTypeColumnFamilyDeletion varint32 varstring varstring
// kTypeColumnFamilySingleDeletion varint32 varstring varstring
// kTypeColumnFamilySingleDeletion varint32 varstring varstring
// kTypeColumnFamilyMerge varint32 varstring varstring
// kTypeColumnFamilyMerge varint32 varstring varstring
// kTypeBeginPrepareXID varstring
// kTypeEndPrepareXID
// kTypeCommitXID varstring
// kTypeRollbackXID varstring
// kTypeNoop
// varstring :=
// varstring :=
// len: varint32
// len: varint32
// data: uint8[len]
// data: uint8[len]
@ -48,11 +53,15 @@ namespace rocksdb {
namespace {
namespace {
enum ContentFlags : uint32_t {
enum ContentFlags : uint32_t {
DEFERRED = 1 ,
DEFERRED = 1 < < 0 ,
HAS_PUT = 2 ,
HAS_PUT = 1 < < 1 ,
HAS_DELETE = 4 ,
HAS_DELETE = 1 < < 2 ,
HAS_SINGLE_DELETE = 8 ,
HAS_SINGLE_DELETE = 1 < < 3 ,
HAS_MERGE = 16 ,
HAS_MERGE = 1 < < 4 ,
HAS_BEGIN_PREPARE = 1 < < 5 ,
HAS_END_PREPARE = 1 < < 6 ,
HAS_COMMIT = 1 < < 7 ,
HAS_ROLLBACK = 1 < < 8 ,
} ;
} ;
struct BatchContentClassifier : public WriteBatch : : Handler {
struct BatchContentClassifier : public WriteBatch : : Handler {
@ -77,6 +86,26 @@ struct BatchContentClassifier : public WriteBatch::Handler {
content_flags | = ContentFlags : : HAS_MERGE ;
content_flags | = ContentFlags : : HAS_MERGE ;
return Status : : OK ( ) ;
return Status : : OK ( ) ;
}
}
Status MarkBeginPrepare ( ) override {
content_flags | = ContentFlags : : HAS_BEGIN_PREPARE ;
return Status : : OK ( ) ;
}
Status MarkEndPrepare ( const Slice & ) override {
content_flags | = ContentFlags : : HAS_END_PREPARE ;
return Status : : OK ( ) ;
}
Status MarkCommit ( const Slice & ) override {
content_flags | = ContentFlags : : HAS_COMMIT ;
return Status : : OK ( ) ;
}
Status MarkRollback ( const Slice & ) override {
content_flags | = ContentFlags : : HAS_ROLLBACK ;
return Status : : OK ( ) ;
}
} ;
} ;
} // anon namespace
} // anon namespace
@ -209,9 +238,25 @@ bool ReadKeyFromWriteBatchEntry(Slice* input, Slice* key, bool cf_record) {
return GetLengthPrefixedSlice ( input , key ) ;
return GetLengthPrefixedSlice ( input , key ) ;
}
}
bool WriteBatch : : HasBeginPrepare ( ) const {
return ( ComputeContentFlags ( ) & ContentFlags : : HAS_BEGIN_PREPARE ) ! = 0 ;
}
bool WriteBatch : : HasEndPrepare ( ) const {
return ( ComputeContentFlags ( ) & ContentFlags : : HAS_END_PREPARE ) ! = 0 ;
}
bool WriteBatch : : HasCommit ( ) const {
return ( ComputeContentFlags ( ) & ContentFlags : : HAS_COMMIT ) ! = 0 ;
}
bool WriteBatch : : HasRollback ( ) const {
return ( ComputeContentFlags ( ) & ContentFlags : : HAS_ROLLBACK ) ! = 0 ;
}
Status ReadRecordFromWriteBatch ( Slice * input , char * tag ,
Status ReadRecordFromWriteBatch ( Slice * input , char * tag ,
uint32_t * column_family , Slice * key ,
uint32_t * column_family , Slice * key ,
Slice * value , Slice * blob ) {
Slice * value , Slice * blob , Slice * xid ) {
assert ( key ! = nullptr & & value ! = nullptr ) ;
assert ( key ! = nullptr & & value ! = nullptr ) ;
* tag = ( * input ) [ 0 ] ;
* tag = ( * input ) [ 0 ] ;
input - > remove_prefix ( 1 ) ;
input - > remove_prefix ( 1 ) ;
@ -257,6 +302,24 @@ Status ReadRecordFromWriteBatch(Slice* input, char* tag,
return Status : : Corruption ( " bad WriteBatch Blob " ) ;
return Status : : Corruption ( " bad WriteBatch Blob " ) ;
}
}
break ;
break ;
case kTypeNoop :
case kTypeBeginPrepareXID :
break ;
case kTypeEndPrepareXID :
if ( ! GetLengthPrefixedSlice ( input , xid ) ) {
return Status : : Corruption ( " bad EndPrepare XID " ) ;
}
break ;
case kTypeCommitXID :
if ( ! GetLengthPrefixedSlice ( input , xid ) ) {
return Status : : Corruption ( " bad Commit XID " ) ;
}
break ;
case kTypeRollbackXID :
if ( ! GetLengthPrefixedSlice ( input , xid ) ) {
return Status : : Corruption ( " bad Rollback XID " ) ;
}
break ;
default :
default :
return Status : : Corruption ( " unknown WriteBatch tag " ) ;
return Status : : Corruption ( " unknown WriteBatch tag " ) ;
}
}
@ -270,7 +333,7 @@ Status WriteBatch::Iterate(Handler* handler) const {
}
}
input . remove_prefix ( WriteBatchInternal : : kHeader ) ;
input . remove_prefix ( WriteBatchInternal : : kHeader ) ;
Slice key , value , blob ;
Slice key , value , blob , xid ;
int found = 0 ;
int found = 0 ;
Status s ;
Status s ;
while ( s . ok ( ) & & ! input . empty ( ) & & handler - > Continue ( ) ) {
while ( s . ok ( ) & & ! input . empty ( ) & & handler - > Continue ( ) ) {
@ -278,7 +341,7 @@ Status WriteBatch::Iterate(Handler* handler) const {
uint32_t column_family = 0 ; // default
uint32_t column_family = 0 ; // default
s = ReadRecordFromWriteBatch ( & input , & tag , & column_family , & key , & value ,
s = ReadRecordFromWriteBatch ( & input , & tag , & column_family , & key , & value ,
& blob ) ;
& blob , & xid ) ;
if ( ! s . ok ( ) ) {
if ( ! s . ok ( ) ) {
return s ;
return s ;
}
}
@ -315,6 +378,28 @@ Status WriteBatch::Iterate(Handler* handler) const {
case kTypeLogData :
case kTypeLogData :
handler - > LogData ( blob ) ;
handler - > LogData ( blob ) ;
break ;
break ;
case kTypeBeginPrepareXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
( ContentFlags : : DEFERRED | ContentFlags : : HAS_BEGIN_PREPARE ) ) ;
handler - > MarkBeginPrepare ( ) ;
break ;
case kTypeEndPrepareXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
( ContentFlags : : DEFERRED | ContentFlags : : HAS_END_PREPARE ) ) ;
handler - > MarkEndPrepare ( xid ) ;
break ;
case kTypeCommitXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
( ContentFlags : : DEFERRED | ContentFlags : : HAS_COMMIT ) ) ;
handler - > MarkCommit ( xid ) ;
break ;
case kTypeRollbackXID :
assert ( content_flags_ . load ( std : : memory_order_relaxed ) &
( ContentFlags : : DEFERRED | ContentFlags : : HAS_ROLLBACK ) ) ;
handler - > MarkRollback ( xid ) ;
break ;
case kTypeNoop :
break ;
default :
default :
return Status : : Corruption ( " unknown WriteBatch tag " ) ;
return Status : : Corruption ( " unknown WriteBatch tag " ) ;
}
}
@ -391,6 +476,47 @@ void WriteBatch::Put(ColumnFamilyHandle* column_family, const SliceParts& key,
WriteBatchInternal : : Put ( this , GetColumnFamilyID ( column_family ) , key , value ) ;
WriteBatchInternal : : Put ( this , GetColumnFamilyID ( column_family ) , key , value ) ;
}
}
void WriteBatchInternal : : InsertNoop ( WriteBatch * b ) {
b - > rep_ . push_back ( static_cast < char > ( kTypeNoop ) ) ;
}
void WriteBatchInternal : : MarkEndPrepare ( WriteBatch * b , const Slice & xid ) {
// a manually constructed batch can only contain one prepare section
assert ( b - > rep_ [ 12 ] = = static_cast < char > ( kTypeNoop ) ) ;
// all savepoints up to this point are cleared
if ( b - > save_points_ ! = nullptr ) {
while ( ! b - > save_points_ - > stack . empty ( ) ) {
b - > save_points_ - > stack . pop ( ) ;
}
}
// rewrite noop as begin marker
b - > rep_ [ 12 ] = static_cast < char > ( kTypeBeginPrepareXID ) ;
b - > rep_ . push_back ( static_cast < char > ( kTypeEndPrepareXID ) ) ;
PutLengthPrefixedSlice ( & b - > rep_ , xid ) ;
b - > content_flags_ . store ( b - > content_flags_ . load ( std : : memory_order_relaxed ) |
ContentFlags : : HAS_END_PREPARE |
ContentFlags : : HAS_BEGIN_PREPARE ,
std : : memory_order_relaxed ) ;
}
void WriteBatchInternal : : MarkCommit ( WriteBatch * b , const Slice & xid ) {
b - > rep_ . push_back ( static_cast < char > ( kTypeCommitXID ) ) ;
PutLengthPrefixedSlice ( & b - > rep_ , xid ) ;
b - > content_flags_ . store ( b - > content_flags_ . load ( std : : memory_order_relaxed ) |
ContentFlags : : HAS_COMMIT ,
std : : memory_order_relaxed ) ;
}
void WriteBatchInternal : : MarkRollback ( WriteBatch * b , const Slice & xid ) {
b - > rep_ . push_back ( static_cast < char > ( kTypeRollbackXID ) ) ;
PutLengthPrefixedSlice ( & b - > rep_ , xid ) ;
b - > content_flags_ . store ( b - > content_flags_ . load ( std : : memory_order_relaxed ) |
ContentFlags : : HAS_ROLLBACK ,
std : : memory_order_relaxed ) ;
}
void WriteBatchInternal : : Delete ( WriteBatch * b , uint32_t column_family_id ,
void WriteBatchInternal : : Delete ( WriteBatch * b , uint32_t column_family_id ,
const Slice & key ) {
const Slice & key ) {
WriteBatchInternal : : SetCount ( b , WriteBatchInternal : : Count ( b ) + 1 ) ;
WriteBatchInternal : : SetCount ( b , WriteBatchInternal : : Count ( b ) + 1 ) ;