@ -462,6 +462,37 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
auto s =
txns [ i ] - > GetForUpdate ( read_options , " 0 " , nullptr , true /* exclusive */ ) ;
ASSERT_TRUE ( s . IsDeadlock ( ) ) ;
// Calculate next buffer len, plateau at 5 when 5 records are inserted.
const uint32_t curr_dlock_buffer_len_ =
( i - 14 > kInitialMaxDeadlocks ) ? kInitialMaxDeadlocks : ( i - 14 ) ;
auto dlock_buffer = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer . size ( ) , curr_dlock_buffer_len_ ) ;
auto dlock_entry = dlock_buffer [ 0 ] . path ;
ASSERT_EQ ( dlock_entry . size ( ) , kInitialMaxDeadlocks ) ;
int64_t curr_waiting_key = 0 ;
// Offset of each txn id from the root of the shared dlock tree's txn id.
int64_t offset_root = dlock_entry [ 0 ] . m_txn_id - 1 ;
// Offset of the final entry in the dlock path from the root's txn id.
TransactionID leaf_id =
dlock_entry [ dlock_entry . size ( ) - 1 ] . m_txn_id - offset_root ;
for ( auto it = dlock_entry . rbegin ( ) ; it ! = dlock_entry . rend ( ) ; it + + ) {
auto dl_node = * it ;
ASSERT_EQ ( dl_node . m_txn_id , offset_root + leaf_id ) ;
ASSERT_EQ ( dl_node . m_cf_id , 0 ) ;
ASSERT_EQ ( dl_node . m_waiting_key , ToString ( curr_waiting_key ) ) ;
ASSERT_EQ ( dl_node . m_exclusive , true ) ;
if ( curr_waiting_key = = 0 ) {
curr_waiting_key = leaf_id ;
}
curr_waiting_key / = 2 ;
leaf_id / = 2 ;
}
}
// Rollback the leaf transaction.
@ -473,6 +504,102 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
for ( auto & t : threads ) {
t . join ( ) ;
}
// Downsize the buffer and verify the 3 latest deadlocks are preserved.
auto dlock_buffer_before_resize = db - > GetDeadlockInfoBuffer ( ) ;
db - > SetDeadlockInfoBufferSize ( 3 ) ;
auto dlock_buffer_after_resize = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer_after_resize . size ( ) , 3 ) ;
for ( uint32_t i = 0 ; i < dlock_buffer_after_resize . size ( ) ; i + + ) {
for ( uint32_t j = 0 ; j < dlock_buffer_after_resize [ i ] . path . size ( ) ; j + + ) {
ASSERT_EQ ( dlock_buffer_after_resize [ i ] . path [ j ] . m_txn_id ,
dlock_buffer_before_resize [ i ] . path [ j ] . m_txn_id ) ;
}
}
// Upsize the buffer and verify the 3 latest dealocks are preserved.
dlock_buffer_before_resize = db - > GetDeadlockInfoBuffer ( ) ;
db - > SetDeadlockInfoBufferSize ( 5 ) ;
dlock_buffer_after_resize = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer_after_resize . size ( ) , 3 ) ;
for ( uint32_t i = 0 ; i < dlock_buffer_before_resize . size ( ) ; i + + ) {
for ( uint32_t j = 0 ; j < dlock_buffer_before_resize [ i ] . path . size ( ) ; j + + ) {
ASSERT_EQ ( dlock_buffer_after_resize [ i ] . path [ j ] . m_txn_id ,
dlock_buffer_before_resize [ i ] . path [ j ] . m_txn_id ) ;
}
}
// Downsize to 0 and verify the size is consistent.
dlock_buffer_before_resize = db - > GetDeadlockInfoBuffer ( ) ;
db - > SetDeadlockInfoBufferSize ( 0 ) ;
dlock_buffer_after_resize = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer_after_resize . size ( ) , 0 ) ;
// Upsize from 0 to verify the size is persistent.
dlock_buffer_before_resize = db - > GetDeadlockInfoBuffer ( ) ;
db - > SetDeadlockInfoBufferSize ( 3 ) ;
dlock_buffer_after_resize = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer_after_resize . size ( ) , 0 ) ;
// Contrived case of shared lock of cycle size 2 to verify that a shared
// lock causing a deadlock is correctly reported as "shared" in the buffer.
std : : vector < Transaction * > txns_shared ( 2 ) ;
// Create a cycle of size 2.
for ( uint32_t i = 0 ; i < 2 ; i + + ) {
txns_shared [ i ] = db - > BeginTransaction ( write_options , txn_options ) ;
ASSERT_TRUE ( txns_shared [ i ] ) ;
auto s = txns_shared [ i ] - > GetForUpdate ( read_options , ToString ( i ) , nullptr ) ;
ASSERT_OK ( s ) ;
}
std : : atomic < uint32_t > checkpoints_shared ( 0 ) ;
rocksdb : : SyncPoint : : GetInstance ( ) - > SetCallBack (
" TransactionLockMgr::AcquireWithTimeout:WaitingTxn " ,
[ & ] ( void * arg ) { checkpoints_shared . fetch_add ( 1 ) ; } ) ;
rocksdb : : SyncPoint : : GetInstance ( ) - > EnableProcessing ( ) ;
std : : vector < port : : Thread > threads_shared ;
for ( uint32_t i = 0 ; i < 1 ; i + + ) {
std : : function < void ( ) > blocking_thread = [ & , i ] {
auto s =
txns_shared [ i ] - > GetForUpdate ( read_options , ToString ( i + 1 ) , nullptr ) ;
ASSERT_OK ( s ) ;
txns_shared [ i ] - > Rollback ( ) ;
delete txns_shared [ i ] ;
} ;
threads_shared . emplace_back ( blocking_thread ) ;
}
// Wait until all threads are waiting on each other.
while ( checkpoints_shared . load ( ) ! = 1 ) {
/* sleep override */
std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 100 ) ) ;
}
rocksdb : : SyncPoint : : GetInstance ( ) - > DisableProcessing ( ) ;
rocksdb : : SyncPoint : : GetInstance ( ) - > ClearAllCallBacks ( ) ;
// Complete the cycle T2 -> T1 with a shared lock.
auto s = txns_shared [ 1 ] - > GetForUpdate ( read_options , " 0 " , nullptr , false ) ;
ASSERT_TRUE ( s . IsDeadlock ( ) ) ;
auto dlock_buffer = db - > GetDeadlockInfoBuffer ( ) ;
// Verify the size of the buffer and the single path.
ASSERT_EQ ( dlock_buffer . size ( ) , 1 ) ;
ASSERT_EQ ( dlock_buffer [ 0 ] . path . size ( ) , 2 ) ;
// Verify the exclusivity field of the transactions in the deadlock path.
ASSERT_TRUE ( dlock_buffer [ 0 ] . path [ 0 ] . m_exclusive ) ;
ASSERT_FALSE ( dlock_buffer [ 0 ] . path [ 1 ] . m_exclusive ) ;
txns_shared [ 1 ] - > Rollback ( ) ;
delete txns_shared [ 1 ] ;
for ( auto & t : threads_shared ) {
t . join ( ) ;
}
}
TEST_P ( TransactionTest , DeadlockCycle ) {
@ -480,7 +607,8 @@ TEST_P(TransactionTest, DeadlockCycle) {
ReadOptions read_options ;
TransactionOptions txn_options ;
const uint32_t kMaxCycleLength = 50 ;
// offset by 2 from the max depth to test edge case
const uint32_t kMaxCycleLength = 52 ;
txn_options . lock_timeout = 1000000 ;
txn_options . deadlock_detect = true ;
@ -489,6 +617,7 @@ TEST_P(TransactionTest, DeadlockCycle) {
// Set up a long wait for chain like this:
//
// T1 -> T2 -> T3 -> ... -> Tlen
std : : vector < Transaction * > txns ( len ) ;
for ( uint32_t i = 0 ; i < len ; i + + ) {
@ -509,8 +638,7 @@ TEST_P(TransactionTest, DeadlockCycle) {
std : : vector < port : : Thread > threads ;
for ( uint32_t i = 0 ; i < len - 1 ; i + + ) {
std : : function < void ( ) > blocking_thread = [ & , i ] {
auto s =
txns [ i ] - > GetForUpdate ( read_options , ToString ( i + 1 ) , nullptr ) ;
auto s = txns [ i ] - > GetForUpdate ( read_options , ToString ( i + 1 ) , nullptr ) ;
ASSERT_OK ( s ) ;
txns [ i ] - > Rollback ( ) ;
delete txns [ i ] ;
@ -530,6 +658,39 @@ TEST_P(TransactionTest, DeadlockCycle) {
auto s = txns [ len - 1 ] - > GetForUpdate ( read_options , " 0 " , nullptr ) ;
ASSERT_TRUE ( s . IsDeadlock ( ) ) ;
const uint32_t dlock_buffer_size_ = ( len - 1 > 5 ) ? 5 : ( len - 1 ) ;
uint32_t curr_waiting_key = 0 ;
TransactionID curr_txn_id = txns [ 0 ] - > GetID ( ) ;
auto dlock_buffer = db - > GetDeadlockInfoBuffer ( ) ;
ASSERT_EQ ( dlock_buffer . size ( ) , dlock_buffer_size_ ) ;
uint32_t check_len = len ;
bool check_limit_flag = false ;
// Special case for a deadlock path that exceeds the maximum depth.
if ( len > 50 ) {
check_len = 0 ;
check_limit_flag = true ;
}
auto dlock_entry = dlock_buffer [ 0 ] . path ;
ASSERT_EQ ( dlock_entry . size ( ) , check_len ) ;
ASSERT_EQ ( dlock_buffer [ 0 ] . limit_exceeded , check_limit_flag ) ;
// Iterates backwards over path verifying decreasing txn_ids.
for ( auto it = dlock_entry . rbegin ( ) ; it ! = dlock_entry . rend ( ) ; it + + ) {
auto dl_node = * it ;
ASSERT_EQ ( dl_node . m_txn_id , len + curr_txn_id - 1 ) ;
ASSERT_EQ ( dl_node . m_cf_id , 0 ) ;
ASSERT_EQ ( dl_node . m_waiting_key , ToString ( curr_waiting_key ) ) ;
ASSERT_EQ ( dl_node . m_exclusive , true ) ;
curr_txn_id - - ;
if ( curr_waiting_key = = 0 ) {
curr_waiting_key = len ;
}
curr_waiting_key - - ;
}
// Rollback the last transaction.
txns [ len - 1 ] - > Rollback ( ) ;
delete txns [ len - 1 ] ;