@ -327,11 +327,10 @@ class WritePreparedTransactionTest : public TransactionTest {
}
} ;
// TODO(myabandeh): enable it for concurrent_prepare
INSTANTIATE_TEST_CASE_P ( WritePreparedTransactionTest ,
WritePreparedTransactionTest ,
: : testing : : Values ( std : : make_tuple ( false , false ,
WRITE_PREPARED ) ) ) ;
INSTANTIATE_TEST_CASE_P (
WritePreparedTransactionTest , WritePreparedTransactionTest ,
: : testing : : Values ( std : : make_tuple ( false , false , WRITE_PREPARED ) ,
std : : make_tuple ( false , true , WRITE_PREPARED ) ) ) ;
TEST_P ( WritePreparedTransactionTest , CommitMapTest ) {
WritePreparedTxnDB * wp_db = dynamic_cast < WritePreparedTxnDB * > ( db ) ;
@ -595,113 +594,6 @@ TEST_P(WritePreparedTransactionTest, AdvanceMaxEvictedSeqBasicTest) {
}
}
// TODO(myabandeh): remove this redundant test after transaction_test is enabled
// with WRITE_PREPARED too This test clarifies the existing expectation from the
// sequence number algorithm. It could detect mistakes in updating the code but
// it is not necessarily the one acceptable way. If the algorithm is
// legitimately changed, this unit test should be updated as well.
TEST_P ( WritePreparedTransactionTest , SeqAdvanceTest ) {
WriteOptions wopts ;
FlushOptions fopt ;
options . disable_auto_compactions = true ;
ReOpen ( ) ;
// Do the test with NUM_BRANCHES branches in it. Each run of a test takes some
// of the branches. This is the same as counting a binary number where i-th
// bit represents whether we take branch i in the represented by the number.
const size_t NUM_BRANCHES = 8 ;
// Helper function that shows if the branch is to be taken in the run
// represented by the number n.
auto branch_do = [ & ] ( size_t n , size_t * branch ) {
assert ( * branch < NUM_BRANCHES ) ;
const size_t filter = static_cast < size_t > ( 1 ) < < * branch ;
return n & filter ;
} ;
const size_t max_n = static_cast < size_t > ( 1 ) < < NUM_BRANCHES ;
for ( size_t n = 0 ; n < max_n ; n + + , ReOpen ( ) ) {
DBImpl * db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
size_t branch = 0 ;
auto seq = db_impl - > GetLatestSequenceNumber ( ) ;
exp_seq = seq ;
txn_t0 ( 0 ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
if ( branch_do ( n , & branch ) ) {
db_impl - > Flush ( fopt ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
if ( branch_do ( n , & branch ) ) {
db_impl - > FlushWAL ( true ) ;
ReOpenNoDelete ( ) ;
db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
// Doing it twice might detect some bugs
txn_t0 ( 1 ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
txn_t1 ( 0 ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
if ( branch_do ( n , & branch ) ) {
db_impl - > Flush ( fopt ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
if ( branch_do ( n , & branch ) ) {
db_impl - > FlushWAL ( true ) ;
ReOpenNoDelete ( ) ;
db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
txn_t3 ( 0 ) ;
// Since commit marker does not write to memtable, the last seq number is
// not updated immediately. But the advance should be visible after the next
// write.
if ( branch_do ( n , & branch ) ) {
db_impl - > Flush ( fopt ) ;
}
if ( branch_do ( n , & branch ) ) {
db_impl - > FlushWAL ( true ) ;
ReOpenNoDelete ( ) ;
db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
txn_t0 ( 0 ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
txn_t2 ( 0 ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
if ( branch_do ( n , & branch ) ) {
db_impl - > Flush ( fopt ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
if ( branch_do ( n , & branch ) ) {
db_impl - > FlushWAL ( true ) ;
ReOpenNoDelete ( ) ;
db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
seq = db_impl - > GetLatestSequenceNumber ( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
}
}
}
TEST_P ( WritePreparedTransactionTest , SeqAdvanceConcurrentTest ) {
// Given the sequential run of txns, with this timeout we should never see a
// deadlock nor a timeout unless we have a key conflict, which should be
@ -732,7 +624,7 @@ TEST_P(WritePreparedTransactionTest, SeqAdvanceConcurrentTest) {
printf ( " Tested % " PRIu64 " cases so far \n " , n ) ;
}
DBImpl * db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
auto seq = db_impl - > GetLatestSequenceNumber ( ) ;
auto seq = db_impl - > TEST_ GetLatestVisible SequenceNumber( ) ;
exp_seq = seq ;
// This is increased before writing the batch for commit
commit_writes = 0 ;
@ -800,24 +692,28 @@ TEST_P(WritePreparedTransactionTest, SeqAdvanceConcurrentTest) {
for ( auto & t : threads ) {
t . join ( ) ;
}
if ( txn_db_ options. write_policy = = WRITE_PREPARED ) {
if ( options . concurrent_prepare ) {
// In this case none of the above scheduling tricks to deterministically
// form merged bactches works because the writes go to saparte queues.
// This would result in different write groups in each run of the test. We
// still keep the test since althgouh non-deterministic and hard to debug,
// it is still useful to have. Since in this case we could finish with
// commit writes that dont write to memtable, the seq is not advanced in
// this code path. It will be after the next write. So we do one more
// write to make the impact of last seq visible.
txn_t0 ( 0 ) ;
// it is still useful to have.
// TODO(myabandeh): Add a deterministic unit test for concurrent_prepare
}
// Check if memtable inserts advanced seq number as expected
seq = db_impl - > GetLatestSequenceNumber ( ) ;
seq = db_impl - > TEST_ GetLatestVisible SequenceNumber( ) ;
ASSERT_EQ ( exp_seq , seq ) ;
rocksdb : : SyncPoint : : GetInstance ( ) - > DisableProcessing ( ) ;
rocksdb : : SyncPoint : : GetInstance ( ) - > ClearAllCallBacks ( ) ;
// The latest seq might be due to a commit without prepare and hence not
// persisted in the WAL. To make the verification of seq after recovery
// easier we write in a transaction with prepare which makes the latest seq
// to be persisted via the commitmarker.
txn_t3 ( 0 ) ;
// Check if recovery preserves the last sequence number
db_impl - > FlushWAL ( true ) ;
ReOpenNoDelete ( ) ;
@ -1354,11 +1250,16 @@ TEST_P(WritePreparedTransactionTest, DisableGCDuringRecoveryTest) {
options . write_buffer_size = 1024 * 1024 ;
ReOpen ( ) ;
std : : vector < KeyVersion > versions ;
uint64_t seq = 0 ;
for ( uint64_t i = 1 ; i < = 1024 ; i + + ) {
std : : string v = " bar " + ToString ( i ) ;
ASSERT_OK ( db - > Put ( WriteOptions ( ) , " foo " , v ) ) ;
VerifyKeys ( { { " foo " , v } } ) ;
KeyVersion kv = { " foo " , v , i , kTypeValue } ;
seq + + ; // one for the key/value
KeyVersion kv = { " foo " , v , seq , kTypeValue } ;
if ( options . concurrent_prepare ) {
seq + + ; // one for the commit
}
versions . emplace_back ( kv ) ;
}
std : : reverse ( std : : begin ( versions ) , std : : end ( versions ) ) ;
@ -1395,6 +1296,7 @@ TEST_P(WritePreparedTransactionTest, SequenceNumberZeroTest) {
TEST_P ( WritePreparedTransactionTest , CompactionShouldKeepUncommittedKeys ) {
options . disable_auto_compactions = true ;
ReOpen ( ) ;
DBImpl * db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
// Snapshots to avoid keys get evicted.
std : : vector < const Snapshot * > snapshots ;
// Keep track of expected sequence number.
@ -1402,7 +1304,11 @@ TEST_P(WritePreparedTransactionTest, CompactionShouldKeepUncommittedKeys) {
auto add_key = [ & ] ( std : : function < Status ( ) > func ) {
ASSERT_OK ( func ( ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
expected_seq + + ;
if ( options . concurrent_prepare ) {
expected_seq + + ; // 1 for commit
}
ASSERT_EQ ( expected_seq , db_impl - > TEST_GetLatestVisibleSequenceNumber ( ) ) ;
snapshots . push_back ( db - > GetSnapshot ( ) ) ;
} ;
@ -1489,7 +1395,8 @@ TEST_P(WritePreparedTransactionTest, CompactionShouldKeepSnapshotVisibleKeys) {
ASSERT_OK ( txn1 - > Prepare ( ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
ASSERT_OK ( txn1 - > Commit ( ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
DBImpl * db_impl = reinterpret_cast < DBImpl * > ( db - > GetRootDB ( ) ) ;
ASSERT_EQ ( + + expected_seq , db_impl - > TEST_GetLatestVisibleSequenceNumber ( ) ) ;
delete txn1 ;
// Take a snapshots to avoid keys get evicted before compaction.
const Snapshot * snapshot1 = db - > GetSnapshot ( ) ;
@ -1502,16 +1409,24 @@ TEST_P(WritePreparedTransactionTest, CompactionShouldKeepSnapshotVisibleKeys) {
// txn2 commit after snapshot2 and it is not visible.
const Snapshot * snapshot2 = db - > GetSnapshot ( ) ;
ASSERT_OK ( txn2 - > Commit ( ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
ASSERT_EQ ( + + expected_seq , db_impl - > TEST_ GetLatestVisible SequenceNumber( ) ) ;
delete txn2 ;
// Take a snapshots to avoid keys get evicted before compaction.
const Snapshot * snapshot3 = db - > GetSnapshot ( ) ;
ASSERT_OK ( db - > Put ( WriteOptions ( ) , " key1 " , " value1_2 " ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
expected_seq + + ; // 1 for write
SequenceNumber seq1 = expected_seq ;
if ( options . concurrent_prepare ) {
expected_seq + + ; // 1 for commit
}
ASSERT_EQ ( expected_seq , db_impl - > TEST_GetLatestVisibleSequenceNumber ( ) ) ;
ASSERT_OK ( db - > Put ( WriteOptions ( ) , " key2 " , " value2_2 " ) ) ;
ASSERT_EQ ( + + expected_seq , db - > GetLatestSequenceNumber ( ) ) ;
expected_seq + + ; // 1 for write
SequenceNumber seq2 = expected_seq ;
if ( options . concurrent_prepare ) {
expected_seq + + ; // 1 for commit
}
ASSERT_EQ ( expected_seq , db_impl - > TEST_GetLatestVisibleSequenceNumber ( ) ) ;
ASSERT_OK ( db - > Flush ( FlushOptions ( ) ) ) ;
db - > ReleaseSnapshot ( snapshot1 ) ;
db - > ReleaseSnapshot ( snapshot3 ) ;