@ -281,6 +281,17 @@ class DBTest {
}
}
// Switch between different compaction styles (we have only 2 now).
bool ChangeCompactOptions ( ) {
if ( option_config_ = = kDefault ) {
option_config_ = kUniversalCompaction ;
DestroyAndReopen ( ) ;
return true ;
} else {
return false ;
}
}
// Return the current option configuration.
Options CurrentOptions ( ) {
Options options ;
@ -838,6 +849,7 @@ TEST(DBTest, KeyMayExist) {
// A delete is skipped for key if KeyMayExist(key) returns False
// Tests Writebatch consistency and proper delete behaviour
TEST ( DBTest , FilterDeletes ) {
do {
Options options = CurrentOptions ( ) ;
options . filter_policy = NewBloomFilterPolicy ( 20 ) ;
options . filter_deletes = true ;
@ -871,9 +883,11 @@ TEST(DBTest, FilterDeletes) {
batch . Clear ( ) ;
delete options . filter_policy ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , IterEmpty ) {
do {
Iterator * iter = db_ - > NewIterator ( ReadOptions ( ) ) ;
iter - > SeekToFirst ( ) ;
@ -886,9 +900,11 @@ TEST(DBTest, IterEmpty) {
ASSERT_EQ ( IterStatus ( iter ) , " (invalid) " ) ;
delete iter ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , IterSingle ) {
do {
ASSERT_OK ( Put ( " a " , " va " ) ) ;
Iterator * iter = db_ - > NewIterator ( ReadOptions ( ) ) ;
@ -924,9 +940,11 @@ TEST(DBTest, IterSingle) {
ASSERT_EQ ( IterStatus ( iter ) , " (invalid) " ) ;
delete iter ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , IterMulti ) {
do {
ASSERT_OK ( Put ( " a " , " va " ) ) ;
ASSERT_OK ( Put ( " b " , " vb " ) ) ;
ASSERT_OK ( Put ( " c " , " vc " ) ) ;
@ -1007,9 +1025,11 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ ( IterStatus ( iter ) , " (invalid) " ) ;
delete iter ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , IterSmallAndLargeMix ) {
do {
ASSERT_OK ( Put ( " a " , " va " ) ) ;
ASSERT_OK ( Put ( " b " , std : : string ( 100000 , ' b ' ) ) ) ;
ASSERT_OK ( Put ( " c " , " vc " ) ) ;
@ -1045,6 +1065,7 @@ TEST(DBTest, IterSmallAndLargeMix) {
ASSERT_EQ ( IterStatus ( iter ) , " (invalid) " ) ;
delete iter ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , IterMultiWithDelete ) {
@ -1106,6 +1127,7 @@ TEST(DBTest, RollLog) {
}
TEST ( DBTest , WAL ) {
do {
Options options = CurrentOptions ( ) ;
WriteOptions writeOpt = WriteOptions ( ) ;
writeOpt . disableWAL = true ;
@ -1135,16 +1157,22 @@ TEST(DBTest, WAL) {
// again both values should be present.
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v3 " , Get ( " bar " ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , CheckLock ) {
do {
DB * localdb ;
Options options = CurrentOptions ( ) ;
ASSERT_TRUE ( TryReopen ( & options ) . ok ( ) ) ;
ASSERT_TRUE ( ! ( PureReopen ( & options , & localdb ) . ok ( ) ) ) ; // second open should fail
// second open should fail
ASSERT_TRUE ( ! ( PureReopen ( & options , & localdb ) . ok ( ) ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , FLUSH ) {
do {
Options options = CurrentOptions ( ) ;
WriteOptions writeOpt = WriteOptions ( ) ;
writeOpt . disableWAL = true ;
@ -1176,6 +1204,7 @@ TEST(DBTest, FLUSH) {
// has WAL enabled.
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v3 " , Get ( " bar " ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , RecoveryWithEmptyLog ) {
@ -1214,6 +1243,7 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
}
TEST ( DBTest , MinorCompactionsHappen ) {
do {
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 10000 ;
Reopen ( & options ) ;
@ -1236,9 +1266,11 @@ TEST(DBTest, MinorCompactionsHappen) {
for ( int i = 0 ; i < N ; i + + ) {
ASSERT_EQ ( Key ( i ) + std : : string ( 1000 , ' v ' ) , Get ( Key ( i ) ) ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , ManifestRollOver ) {
do {
Options options = CurrentOptions ( ) ;
options . max_manifest_file_size = 10 ; // 10 bytes
Reopen ( & options ) ;
@ -1260,10 +1292,11 @@ TEST(DBTest, ManifestRollOver) {
ASSERT_EQ ( std : : string ( 1000 , ' 2 ' ) , Get ( " manifest_key2 " ) ) ;
ASSERT_EQ ( std : : string ( 1000 , ' 3 ' ) , Get ( " manifest_key3 " ) ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , RecoverWithLargeLog ) {
do {
{
Options options = CurrentOptions ( ) ;
Reopen ( & options ) ;
@ -1285,6 +1318,7 @@ TEST(DBTest, RecoverWithLargeLog) {
ASSERT_EQ ( std : : string ( 10 , ' 3 ' ) , Get ( " small3 " ) ) ;
ASSERT_EQ ( std : : string ( 10 , ' 4 ' ) , Get ( " small4 " ) ) ;
ASSERT_GT ( NumTableFilesAtLevel ( 0 ) , 1 ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , CompactionsGenerateMultipleFiles ) {
@ -1348,6 +1382,139 @@ TEST(DBTest, CompactionTrigger) {
ASSERT_EQ ( NumTableFilesAtLevel ( 1 ) , 1 ) ;
}
TEST ( DBTest , UniversalCompactionTrigger ) {
Options options = CurrentOptions ( ) ;
options . compaction_style = kCompactionStyleUniversal ;
options . write_buffer_size = 100 < < 10 ; //100KB
// trigger compaction if there are > 3 files
options . level0_file_num_compaction_trigger = 3 ;
Reopen ( & options ) ;
Random rnd ( 301 ) ;
int key_idx = 0 ;
// Stage 1:
// Generate a set of files at level 0, but don't trigger level-0
// compaction.
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompactMemTable ( ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , num + 1 ) ;
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
// Suppose each file flushed from mem table has size 1. Now we compact
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
// file of size 4.
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 1 ) ;
for ( int i = 1 ; i < options . num_levels ; i + + ) {
ASSERT_EQ ( NumTableFilesAtLevel ( i ) , 0 ) ;
}
// Stage 2:
// Now we have one file at level 0, with size 4. We also have some data in
// mem table. Let's continue generating new files at level 0, but don't
// trigger level-0 compaction.
// First, clean up memtable before inserting new data. This will generate
// a level-0 file, with size around 0.4 (according to previously written
// data amount).
dbfull ( ) - > Flush ( FlushOptions ( ) ) ;
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 2 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompactMemTable ( ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , num + 3 ) ;
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
// After comapction, we should have 2 files, with size 4, 2.4.
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 2 ) ;
for ( int i = 1 ; i < options . num_levels ; i + + ) {
ASSERT_EQ ( NumTableFilesAtLevel ( i ) , 0 ) ;
}
// Stage 3:
// Now we have 2 files at level 0, with size 4 and 2.4. Continue
// generating new files at level 0.
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 2 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompactMemTable ( ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , num + 3 ) ;
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
// Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
// After comapction, we should have 3 files, with size 4, 2.4, 2.
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 3 ) ;
for ( int i = 1 ; i < options . num_levels ; i + + ) {
ASSERT_EQ ( NumTableFilesAtLevel ( i ) , 0 ) ;
}
// Stage 4:
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
// new file of size 1.
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
// Level-0 compaction is triggered, but no file will be picked up.
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 4 ) ;
for ( int i = 1 ; i < options . num_levels ; i + + ) {
ASSERT_EQ ( NumTableFilesAtLevel ( i ) , 0 ) ;
}
// Stage 5:
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
// a new file of size 1.
for ( int i = 0 ; i < 12 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
// All files at level 0 will be compacted into a single one.
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 1 ) ;
for ( int i = 1 ; i < options . num_levels ; i + + ) {
ASSERT_EQ ( NumTableFilesAtLevel ( i ) , 0 ) ;
}
}
void MinLevelHelper ( DBTest * self , Options & options ) {
Random rnd ( 301 ) ;
@ -1413,6 +1580,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
}
return true ;
}
TEST ( DBTest , MinLevelToCompress1 ) {
Options options = CurrentOptions ( ) ;
CompressionType type ;
@ -1454,6 +1622,7 @@ TEST(DBTest, MinLevelToCompress2) {
}
TEST ( DBTest , RepeatedWritesToSameKey ) {
do {
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . write_buffer_size = 100000 ; // Small write buffer
@ -1470,6 +1639,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
Put ( " key " , value ) ;
ASSERT_LE ( TotalTableFiles ( ) , kMaxFiles ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
// This is a static filter used for filtering
@ -1669,6 +1839,7 @@ TEST(DBTest, CompactionFilter) {
}
TEST ( DBTest , CompactionFilterWithValueChange ) {
do {
Options options = CurrentOptions ( ) ;
options . num_levels = 3 ;
options . max_mem_compaction_level = 0 ;
@ -1714,9 +1885,11 @@ TEST(DBTest, CompactionFilterWithValueChange) {
std : : string newvalue = Get ( key ) ;
ASSERT_EQ ( newvalue . compare ( NEW_VALUE ) , 0 ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , SparseMerge ) {
do {
Options options = CurrentOptions ( ) ;
options . compression = kNoCompression ;
Reopen ( & options ) ;
@ -1754,6 +1927,7 @@ TEST(DBTest, SparseMerge) {
ASSERT_LE ( dbfull ( ) - > TEST_MaxNextLevelOverlappingBytes ( ) , 20 * 1048576 ) ;
dbfull ( ) - > TEST_CompactRange ( 1 , nullptr , nullptr ) ;
ASSERT_LE ( dbfull ( ) - > TEST_MaxNextLevelOverlappingBytes ( ) , 20 * 1048576 ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
static bool Between ( uint64_t val , uint64_t low , uint64_t high ) {
@ -1856,6 +2030,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
}
TEST ( DBTest , IteratorPinsRef ) {
do {
Put ( " foo " , " hello " ) ;
// Get iterator that will yield the current contents of the DB.
@ -1875,6 +2050,7 @@ TEST(DBTest, IteratorPinsRef) {
iter - > Next ( ) ;
ASSERT_TRUE ( ! iter - > Valid ( ) ) ;
delete iter ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , Snapshot ) {
@ -2092,6 +2268,7 @@ TEST(DBTest, OverlapInLevel0) {
}
TEST ( DBTest , L0_CompactionBug_Issue44_a ) {
do {
Reopen ( ) ;
ASSERT_OK ( Put ( " b " , " v " ) ) ;
Reopen ( ) ;
@ -2106,9 +2283,11 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
ASSERT_EQ ( " (a->v) " , Contents ( ) ) ;
env_ - > SleepForMicroseconds ( 1000000 ) ; // Wait for compaction to finish
ASSERT_EQ ( " (a->v) " , Contents ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , L0_CompactionBug_Issue44_b ) {
do {
Reopen ( ) ;
Put ( " " , " " ) ;
Reopen ( ) ;
@ -2132,6 +2311,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ ( " (->)(c->cv) " , Contents ( ) ) ;
env_ - > SleepForMicroseconds ( 1000000 ) ; // Wait for compaction to finish
ASSERT_EQ ( " (->)(c->cv) " , Contents ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , ComparatorCheck ) {
@ -2148,6 +2328,8 @@ TEST(DBTest, ComparatorCheck) {
BytewiseComparator ( ) - > FindShortSuccessor ( key ) ;
}
} ;
do {
NewComparator cmp ;
Options new_options = CurrentOptions ( ) ;
new_options . comparator = & cmp ;
@ -2155,6 +2337,7 @@ TEST(DBTest, ComparatorCheck) {
ASSERT_TRUE ( ! s . ok ( ) ) ;
ASSERT_TRUE ( s . ToString ( ) . find ( " comparator " ) ! = std : : string : : npos )
< < s . ToString ( ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , CustomComparator ) {
@ -2183,6 +2366,8 @@ TEST(DBTest, CustomComparator) {
return val ;
}
} ;
do {
NumberComparator cmp ;
Options new_options = CurrentOptions ( ) ;
new_options . create_if_missing = true ;
@ -2210,6 +2395,7 @@ TEST(DBTest, CustomComparator) {
}
Compact ( " [0] " , " [1000000] " ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , ManualCompaction ) {
@ -2342,9 +2528,9 @@ TEST(DBTest, DestroyDBMetaDatabase) {
ASSERT_TRUE ( ! DB : : Open ( opts , metametadbname , & db ) . ok ( ) ) ;
}
// Check that number of files does not grow when we are out of space
TEST ( DBTest , NoSpace ) {
do {
Options options = CurrentOptions ( ) ;
options . env = env_ ;
Reopen ( & options ) ;
@ -2365,10 +2551,11 @@ TEST(DBTest, NoSpace) {
// Check that compaction attempts slept after errors
ASSERT_GE ( env_ - > sleep_counter_ . Read ( ) , 5 ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , NonWritableFileSystem )
{
TEST ( DBTest , NonWritableFileSystem ) {
do {
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 1000 ;
options . env = env_ ;
@ -2385,6 +2572,7 @@ TEST(DBTest, NonWritableFileSystem)
}
ASSERT_GT ( errors , 0 ) ;
env_ - > non_writable_ . Release_Store ( nullptr ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , ManifestWriteError ) {
@ -2429,6 +2617,7 @@ TEST(DBTest, ManifestWriteError) {
}
TEST ( DBTest , FilesDeletedAfterCompaction ) {
do {
ASSERT_OK ( Put ( " foo " , " v2 " ) ) ;
Compact ( " a " , " z " ) ;
const int num_files = CountLiveFiles ( ) ;
@ -2437,9 +2626,11 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
Compact ( " a " , " z " ) ;
}
ASSERT_EQ ( CountLiveFiles ( ) , num_files ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , BloomFilter ) {
do {
env_ - > count_random_reads_ = true ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
@ -2483,9 +2674,11 @@ TEST(DBTest, BloomFilter) {
env_ - > delay_sstable_sync_ . Release_Store ( nullptr ) ;
Close ( ) ;
delete options . filter_policy ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , SnapshotFiles ) {
do {
Options options = CurrentOptions ( ) ;
const EnvOptions soptions ;
options . write_buffer_size = 100000000 ; // Large write buffer
@ -2613,9 +2806,11 @@ TEST(DBTest, SnapshotFiles) {
// release file snapshot
dbfull ( ) - > DisableFileDeletions ( ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , CompactOnFlush ) {
do {
Options options = CurrentOptions ( ) ;
options . purge_redundant_kvs_while_flush = true ;
options . disable_auto_compactions = true ;
@ -2697,6 +2892,7 @@ TEST(DBTest, CompactOnFlush) {
ASSERT_OK ( dbfull ( ) - > TEST_CompactMemTable ( ) ) ;
ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ v9 ] " ) ;
db_ - > ReleaseSnapshot ( snapshot1 ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
std : : vector < std : : uint64_t > ListLogFiles ( Env * env , const std : : string & path ) {
@ -2716,6 +2912,7 @@ std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
}
TEST ( DBTest , WALArchival ) {
do {
std : : string value ( 1024 , ' 1 ' ) ;
Options options = CurrentOptions ( ) ;
options . create_if_missing = true ;
@ -2756,10 +2953,11 @@ TEST(DBTest, WALArchival) {
logFiles = ListLogFiles ( env_ , archiveDir ) ;
ASSERT_TRUE ( logFiles . size ( ) = = 0 ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , WALClear ) {
do {
Options options = CurrentOptions ( ) ;
options . create_if_missing = true ;
options . WAL_ttl_seconds = 1 ;
@ -2775,6 +2973,7 @@ TEST(DBTest, WALClear) {
dbfull ( ) - > TEST_PurgeObsoleteteWAL ( ) ;
log_files = ListLogFiles ( env_ , archive_dir ) ;
ASSERT_TRUE ( log_files . empty ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
void ExpectRecords (
@ -2794,6 +2993,7 @@ void ExpectRecords(
}
TEST ( DBTest , TransactionLogIterator ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
Put ( " key1 " , DummyString ( 1024 ) ) ;
@ -2814,9 +3014,11 @@ TEST(DBTest, TransactionLogIterator) {
auto iter = OpenTransactionLogIter ( 0 ) ;
ExpectRecords ( 6 , iter ) ;
}
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , TransactionLogIteratorMoveOverZeroFiles ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
// Do a plain Reopen.
@ -2829,9 +3031,11 @@ TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) {
auto iter = OpenTransactionLogIter ( 0 ) ;
ExpectRecords ( 2 , iter ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , TransactionLogIteratorStallAtLastRecord ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
Put ( " key1 " , DummyString ( 1024 ) ) ;
@ -2845,17 +3049,21 @@ TEST(DBTest, TransactionLogIteratorStallAtLastRecord) {
iter - > Next ( ) ;
ASSERT_OK ( iter - > status ( ) ) ;
ASSERT_TRUE ( iter - > Valid ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , TransactionLogIteratorJustEmptyFile ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
unique_ptr < TransactionLogIterator > iter ;
Status status = dbfull ( ) - > GetUpdatesSince ( 0 , & iter ) ;
ASSERT_TRUE ( ! status . ok ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , TransactionLogIteratorCheckAfterRestart ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
Put ( " key1 " , DummyString ( 1024 ) ) ;
@ -2864,9 +3072,11 @@ TEST(DBTest, TransactionLogIteratorCheckAfterRestart) {
Reopen ( & options ) ;
auto iter = OpenTransactionLogIter ( 0 ) ;
ExpectRecords ( 2 , iter ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , TransactionLogIteratorBatchOperations ) {
do {
Options options = OptionsForLogIterTest ( ) ;
DestroyAndReopen ( & options ) ;
WriteBatch batch ;
@ -2880,6 +3090,7 @@ TEST(DBTest, TransactionLogIteratorBatchOperations) {
Put ( " key4 " , DummyString ( 1024 ) ) ;
auto iter = OpenTransactionLogIter ( 3 ) ;
ExpectRecords ( 1 , iter ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , ReadCompaction ) {
@ -3334,6 +3545,7 @@ TEST(DBTest, Randomized) {
}
TEST ( DBTest , MultiGetSimple ) {
do {
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " k1 " , " v1 " ) ) ;
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " k2 " , " v2 " ) ) ;
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " k3 " , " v3 " ) ) ;
@ -3365,9 +3577,11 @@ TEST(DBTest, MultiGetSimple) {
ASSERT_TRUE ( s [ 3 ] . IsNotFound ( ) ) ;
ASSERT_OK ( s [ 4 ] ) ;
ASSERT_TRUE ( s [ 5 ] . IsNotFound ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
TEST ( DBTest , MultiGetEmpty ) {
do {
// Empty Key Set
std : : vector < Slice > keys ;
std : : vector < std : : string > values ;
@ -3386,6 +3600,7 @@ TEST(DBTest, MultiGetEmpty) {
s = db_ - > MultiGet ( ReadOptions ( ) , keys , & values ) ;
ASSERT_EQ ( ( int ) s . size ( ) , 2 ) ;
ASSERT_TRUE ( s [ 0 ] . IsNotFound ( ) & & s [ 1 ] . IsNotFound ( ) ) ;
} while ( ChangeCompactOptions ( ) ) ;
}
std : : string MakeKey ( unsigned int num ) {