@ -84,7 +84,28 @@ class MockWriteCallback : public WriteCallback {
bool AllowWriteBatching ( ) override { return allow_batching_ ; }
} ;
TEST_F ( WriteCallbackTest , WriteWithCallbackTest ) {
class WriteCallbackPTest
: public WriteCallbackTest ,
public : : testing : : WithParamInterface <
std : : tuple < bool , bool , bool , bool , bool , bool , bool > > {
public :
WriteCallbackPTest ( ) {
std : : tie ( unordered_write_ , seq_per_batch_ , two_queues_ , allow_parallel_ ,
allow_batching_ , enable_WAL_ , enable_pipelined_write_ ) =
GetParam ( ) ;
}
protected :
bool unordered_write_ ;
bool seq_per_batch_ ;
bool two_queues_ ;
bool allow_parallel_ ;
bool allow_batching_ ;
bool enable_WAL_ ;
bool enable_pipelined_write_ ;
} ;
TEST_P ( WriteCallbackPTest , WriteWithCallbackTest ) {
struct WriteOP {
WriteOP ( bool should_fail = false ) { callback_ . should_fail_ = should_fail ; }
@ -124,29 +145,21 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
{ false , false , true , false , true } ,
} ;
for ( auto & unordered_write : { true , false } ) {
for ( auto & seq_per_batch : { true , false } ) {
for ( auto & two_queues : { true , false } ) {
for ( auto & allow_parallel : { true , false } ) {
for ( auto & allow_batching : { true , false } ) {
for ( auto & enable_WAL : { true , false } ) {
for ( auto & enable_pipelined_write : { true , false } ) {
for ( auto & write_group : write_scenarios ) {
Options options ;
options . create_if_missing = true ;
options . unordered_write = unordered_write ;
options . allow_concurrent_memtable_write = allow_parallel ;
options . enable_pipelined_write = enable_pipelined_write ;
options . two_write_queues = two_queues ;
options . unordered_write = unordered_write_ ;
options . allow_concurrent_memtable_write = allow_parallel_ ;
options . enable_pipelined_write = enable_pipelined_write_ ;
options . two_write_queues = two_queues_ ;
// Skip unsupported combinations
if ( options . enable_pipelined_write & & seq_per_batch ) {
if ( options . enable_pipelined_write & & seq_per_batch_ ) {
continue ;
}
if ( options . enable_pipelined_write & & options . two_write_queues ) {
continue ;
}
if ( options . unordered_write & &
! options . allow_concurrent_memtable_write ) {
if ( options . unordered_write & & ! options . allow_concurrent_memtable_write ) {
continue ;
}
if ( options . unordered_write & & options . enable_pipelined_write ) {
@ -165,9 +178,8 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
column_families . push_back (
ColumnFamilyDescriptor ( kDefaultColumnFamilyName , cf_options ) ) ;
std : : vector < ColumnFamilyHandle * > handles ;
auto open_s =
DBImpl : : Open ( db_options , dbname , column_families , & handles ,
& db , seq_per_batch , true /* batch_per_txn */ ) ;
auto open_s = DBImpl : : Open ( db_options , dbname , column_families , & handles ,
& db , seq_per_batch_ , true /* batch_per_txn */ ) ;
ASSERT_OK ( open_s ) ;
assert ( handles . size ( ) = = 1 ) ;
delete handles [ 0 ] ;
@ -214,8 +226,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
ASSERT_TRUE ( writer - > state = =
WriteThread : : State : : STATE_GROUP_LEADER ) ;
} else {
ASSERT_TRUE ( writer - > state = =
WriteThread : : State : : STATE_INIT ) ;
ASSERT_TRUE ( writer - > state = = WriteThread : : State : : STATE_INIT ) ;
}
// (meta test) the first WriteOP should indeed be the first
@ -241,17 +252,15 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
// check my state
auto * writer = reinterpret_cast < WriteThread : : Writer * > ( arg ) ;
if ( ! allow_batching ) {
if ( ! allow_batching_ ) {
// no batching so everyone should be a leader
ASSERT_TRUE ( writer - > state = =
WriteThread : : State : : STATE_GROUP_LEADER ) ;
} else if ( ! allow_parallel ) {
ASSERT_TRUE ( writer - > state = =
WriteThread : : State : : STATE_COMPLETED | |
( enable_pipelined_write & &
} else if ( ! allow_parallel_ ) {
ASSERT_TRUE ( writer - > state = = WriteThread : : State : : STATE_COMPLETED | |
( enable_pipelined_write_ & &
writer - > state = =
WriteThread : : State : :
STATE_MEMTABLE_WRITER_LEADER ) ) ;
WriteThread : : State : : STATE_MEMTABLE_WRITER_LEADER ) ) ;
}
} ) ;
@ -275,7 +284,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
auto & write_op = write_group . at ( i ) ;
write_op . Clear ( ) ;
write_op . callback_ . allow_batching_ = allow_batching ;
write_op . callback_ . allow_batching_ = allow_batching_ ;
// insert some keys
for ( uint32_t j = 0 ; j < rnd . Next ( ) % 50 ; j + + ) {
@ -286,41 +295,39 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
string sval ( 10 , my_key ) ;
write_op . Put ( skey , sval ) ;
if ( ! write_op . callback_ . should_fail_ & & ! seq_per_batch ) {
if ( ! write_op . callback_ . should_fail_ & & ! seq_per_batch_ ) {
seq . fetch_add ( 1 ) ;
}
}
if ( ! write_op . callback_ . should_fail_ & & seq_per_batch ) {
if ( ! write_op . callback_ . should_fail_ & & seq_per_batch_ ) {
seq . fetch_add ( 1 ) ;
}
WriteOptions woptions ;
woptions . disableWAL = ! enable_WAL ;
woptions . sync = enable_WAL ;
woptions . disableWAL = ! enable_WAL_ ;
woptions . sync = enable_WAL_ ;
Status s ;
if ( seq_per_batch ) {
if ( seq_per_batch_ ) {
class PublishSeqCallback : public PreReleaseCallback {
public :
PublishSeqCallback ( DBImpl * db_impl_in )
: db_impl_ ( db_impl_in ) { }
Status Callback ( SequenceNumber last_seq , bool /*not used*/ ,
uint64_t , size_t /*index*/ ,
size_t /*total*/ ) override {
PublishSeqCallback ( DBImpl * db_impl_in ) : db_impl_ ( db_impl_in ) { }
Status Callback ( SequenceNumber last_seq , bool /*not used*/ , uint64_t ,
size_t /*index*/ , size_t /*total*/ ) override {
db_impl_ - > SetLastPublishedSequence ( last_seq ) ;
return Status : : OK ( ) ;
}
DBImpl * db_impl_ ;
} publish_seq_callback ( db_impl ) ;
// seq_per_batch requires a natural batch separator or Noop
// seq_per_batch_ requires a natural batch separator or Noop
WriteBatchInternal : : InsertNoop ( & write_op . write_batch_ ) ;
const size_t ONE_BATCH = 1 ;
s = db_impl - > WriteImpl (
woptions , & write_op . write_batch_ , & write_op . callback_ ,
nullptr , 0 , false , nullptr , ONE_BATCH ,
two_queues ? & publish_seq_callback : nullptr ) ;
s = db_impl - > WriteImpl ( woptions , & write_op . write_batch_ ,
& write_op . callback_ , nullptr , 0 , false , nullptr ,
ONE_BATCH ,
two_queues_ ? & publish_seq_callback : nullptr ) ;
} else {
s = db_impl - > WriteWithCallback (
woptions , & write_op . write_batch_ , & write_op . callback_ ) ;
s = db_impl - > WriteWithCallback ( woptions , & write_op . write_batch_ ,
& write_op . callback_ ) ;
}
if ( write_op . callback_ . should_fail_ ) {
@ -349,8 +356,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
ASSERT_TRUE ( w . callback_ . was_called_ . load ( ) ) ;
for ( auto & kvp : w . kvs_ ) {
if ( w . callback_ . should_fail_ ) {
ASSERT_TRUE (
db - > Get ( read_options , kvp . first , & value ) . IsNotFound ( ) ) ;
ASSERT_TRUE ( db - > Get ( read_options , kvp . first , & value ) . IsNotFound ( ) ) ;
} else {
ASSERT_OK ( db - > Get ( read_options , kvp . first , & value ) ) ;
ASSERT_EQ ( value , kvp . second ) ;
@ -363,15 +369,14 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
delete db ;
DestroyDB ( dbname , options ) ;
}
}
}
}
}
}
}
}
}
INSTANTIATE_TEST_CASE_P ( WriteCallbackPTest , WriteCallbackPTest ,
: : testing : : Combine ( : : testing : : Bool ( ) , : : testing : : Bool ( ) ,
: : testing : : Bool ( ) , : : testing : : Bool ( ) ,
: : testing : : Bool ( ) , : : testing : : Bool ( ) ,
: : testing : : Bool ( ) ) ) ;
TEST_F ( WriteCallbackTest , WriteCallBackTest ) {
Options options ;
WriteOptions write_options ;