@ -2734,7 +2734,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
Options options = CurrentOptions ( options_override ) ;
std : : vector < std : : string > cfs ;
for ( int i = 1 ; i < kColumnFamilies ; + + i ) {
cfs . push_back ( ToS tring( i ) ) ;
cfs . push_back ( std : : to_s tring( i ) ) ;
}
Reopen ( options ) ;
CreateAndReopenWithCF ( cfs , options ) ;
@ -2786,7 +2786,7 @@ static void GCThreadBody(void* arg) {
WriteOptions wo ;
for ( int i = 0 ; i < kGCNumKeys ; + + i ) {
std : : string kv ( ToS tring( i + id * kGCNumKeys ) ) ;
std : : string kv ( std : : to_s tring( i + id * kGCNumKeys ) ) ;
ASSERT_OK ( db - > Put ( wo , kv , kv ) ) ;
}
t - > done = true ;
@ -2822,7 +2822,7 @@ TEST_F(DBTest, GroupCommitTest) {
std : : vector < std : : string > expected_db ;
for ( int i = 0 ; i < kGCNumThreads * kGCNumKeys ; + + i ) {
expected_db . push_back ( ToS tring( i ) ) ;
expected_db . push_back ( std : : to_s tring( i ) ) ;
}
std : : sort ( expected_db . begin ( ) , expected_db . end ( ) ) ;
@ -3591,7 +3591,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
Random rnd ( 301 ) ;
for ( int i = 0 ; i < 6 ; + + i ) {
for ( int j = 0 ; j < 110 ; + + j ) {
ASSERT_OK ( Put ( ToS tring( i * 100 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 100 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
// flush should happen here
ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ;
@ -3607,7 +3607,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 5 ) ;
for ( int i = 0 ; i < 50 ; + + i ) {
// these keys should be deleted in previous compaction
ASSERT_EQ ( " NOT_FOUND " , Get ( ToS tring( i ) ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( std : : to_s tring( i ) ) ) ;
}
}
}
@ -3629,7 +3629,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
for ( int i = 0 ; i < 60 ; i + + ) {
// Generate and flush a file about 20KB.
for ( int j = 0 ; j < 20 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3640,7 +3640,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
for ( int i = 0 ; i < 60 ; i + + ) {
// Generate and flush a file about 20KB.
for ( int j = 0 ; j < 20 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j + 2000 ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j + 2000 ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3670,27 +3670,27 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
Random rnd ( 301 ) ;
for ( int i = 0 ; i < 3 ; i + + ) {
// Each file contains a different key which will be dropped later.
ASSERT_OK ( Put ( " a " + ToS tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Put ( " key " + ToS tring( i ) , " " ) ) ;
ASSERT_OK ( Put ( " z " + ToS tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Put ( " a " + std : : to_s tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Put ( " key " + std : : to_s tring( i ) , " " ) ) ;
ASSERT_OK ( Put ( " z " + std : : to_s tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
}
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 1 ) ;
for ( int i = 0 ; i < 3 ; i + + ) {
ASSERT_EQ ( " " , Get ( " key " + ToS tring( i ) ) ) ;
ASSERT_EQ ( " " , Get ( " key " + std : : to_s tring( i ) ) ) ;
}
for ( int i = 0 ; i < 3 ; i + + ) {
// Each file contains a different key which will be dropped later.
ASSERT_OK ( Put ( " a " + ToS tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Delete ( " key " + ToS tring( i ) ) ) ;
ASSERT_OK ( Put ( " z " + ToS tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Put ( " a " + std : : to_s tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Delete ( " key " + std : : to_s tring( i ) ) ) ;
ASSERT_OK ( Put ( " z " + std : : to_s tring( i ) , rnd . RandomString ( 500 ) ) ) ;
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
}
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 2 ) ;
for ( int i = 0 ; i < 3 ; i + + ) {
ASSERT_EQ ( " NOT_FOUND " , Get ( " key " + ToS tring( i ) ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " key " + std : : to_s tring( i ) ) ) ;
}
}
@ -3759,7 +3759,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 10 ; i + + ) {
// Generate and flush a file about 10KB.
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3791,7 +3791,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 10 ; i + + ) {
// Generate and flush a file about 10KB.
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3807,7 +3807,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
// Create 1 more file to trigger TTL compaction. The old files are dropped.
for ( int i = 0 ; i < 1 ; i + + ) {
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
}
@ -3833,7 +3833,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 3 ; i + + ) {
// Generate and flush a file about 10KB.
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3848,7 +3848,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 5 ; i + + ) {
for ( int j = 0 ; j < 140 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3871,7 +3871,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 10 ; i + + ) {
// Generate and flush a file about 10KB.
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3890,7 +3890,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
// Create 10 more files. The old 5 files are dropped as their ttl expired.
for ( int i = 0 ; i < 10 ; i + + ) {
for ( int j = 0 ; j < 10 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3915,7 +3915,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 60 ; i + + ) {
// Generate and flush a file about 20KB.
for ( int j = 0 ; j < 20 ; j + + ) {
ASSERT_OK ( Put ( ToS tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( i * 20 + j ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -3926,7 +3926,8 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
for ( int i = 0 ; i < 60 ; i + + ) {
// Generate and flush a file about 20KB.
for ( int j = 0 ; j < 20 ; j + + ) {
ASSERT_OK ( Put ( ToString ( i * 20 + j + 2000 ) , rnd . RandomString ( 980 ) ) ) ;
ASSERT_OK (
Put ( std : : to_string ( i * 20 + j + 2000 ) , rnd . RandomString ( 980 ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ;
@ -4207,7 +4208,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
std : : vector < port : : Thread > threads ;
threads . emplace_back ( [ & ] {
for ( size_t i = 0 ; i < cnt ; i + + ) {
auto istr = ToS tring( i ) ;
auto istr = std : : to_s tring( i ) ;
ASSERT_OK ( db_ - > Put ( wopt , db_ - > DefaultColumnFamily ( ) , " a " + istr ,
" b " + istr ) ) ;
}
@ -4215,7 +4216,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
if ( two_write_queues ) {
threads . emplace_back ( [ & ] {
for ( size_t i = cnt ; i < 2 * cnt ; i + + ) {
auto istr = ToS tring( i ) ;
auto istr = std : : to_s tring( i ) ;
WriteBatch batch ;
ASSERT_OK ( batch . Put ( " a " + istr , " b " + istr ) ) ;
ASSERT_OK (
@ -4236,7 +4237,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) {
Reopen ( options ) ;
for ( size_t i = 0 ; i < cnt ; i + + ) {
PinnableSlice pval ;
auto istr = ToS tring( i ) ;
auto istr = std : : to_s tring( i ) ;
ASSERT_OK (
db_ - > Get ( ropt , db_ - > DefaultColumnFamily ( ) , " a " + istr , & pval ) ) ;
ASSERT_TRUE ( pval = = ( " b " + istr ) ) ;
@ -4259,7 +4260,7 @@ TEST_F(DBTest, ManualFlushWalAndWriteRace) {
port : : Thread writeThread ( [ & ] ( ) {
for ( int i = 0 ; i < 100 ; i + + ) {
auto istr = ToS tring( i ) ;
auto istr = std : : to_s tring( i ) ;
ASSERT_OK ( dbfull ( ) - > Put ( wopts , " key_ " + istr , " value_ " + istr ) ) ;
}
} ) ;
@ -4607,7 +4608,7 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
// The Put Phase.
for ( int file = 0 ; file < kNumL0Files ; + + file ) {
for ( int key = 0 ; key < kEntriesPerBuffer ; + + key ) {
ASSERT_OK ( Put ( ToS tring( key + file * kEntriesPerBuffer ) ,
ASSERT_OK ( Put ( std : : to_s tring( key + file * kEntriesPerBuffer ) ,
rnd . RandomString ( kTestValueSize ) ) ) ;
}
ASSERT_OK ( Flush ( ) ) ;
@ -4758,7 +4759,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
int operation_count [ ThreadStatus : : NUM_OP_TYPES ] = { 0 } ;
for ( int file = 0 ; file < 16 * kNumL0Files ; + + file ) {
for ( int k = 0 ; k < kEntriesPerBuffer ; + + k ) {
ASSERT_OK ( Put ( ToS tring( key + + ) , rnd . RandomString ( kTestValueSize ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( key + + ) , rnd . RandomString ( kTestValueSize ) ) ) ;
}
ASSERT_OK ( env_ - > GetThreadList ( & thread_list ) ) ;
@ -4845,7 +4846,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
int operation_count [ ThreadStatus : : NUM_OP_TYPES ] = { 0 } ;
for ( int file = 0 ; file < 16 * kNumL0Files ; + + file ) {
for ( int k = 0 ; k < kEntriesPerBuffer ; + + k ) {
ASSERT_OK ( Put ( ToS tring( key + + ) , rnd . RandomString ( kTestValueSize ) ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( key + + ) , rnd . RandomString ( kTestValueSize ) ) ) ;
}
ASSERT_OK ( env_ - > GetThreadList ( & thread_list ) ) ;
@ -5156,8 +5157,9 @@ TEST_F(DBTest, DynamicCompactionOptions) {
// Writing to 64KB L0 files should trigger a compaction. Since these
// 2 L0 files have the same key range, compaction merge them and should
// result in 2 32KB L1 files.
ASSERT_OK ( dbfull ( ) - > SetOptions ( { { " level0_file_num_compaction_trigger " , " 2 " } ,
{ " target_file_size_base " , ToString ( k32KB ) } } ) ) ;
ASSERT_OK (
dbfull ( ) - > SetOptions ( { { " level0_file_num_compaction_trigger " , " 2 " } ,
{ " target_file_size_base " , std : : to_string ( k32KB ) } } ) ) ;
gen_l0_kb ( 0 , 64 , 1 ) ;
ASSERT_EQ ( " 1,1 " , FilesPerLevel ( ) ) ;
@ -5176,8 +5178,8 @@ TEST_F(DBTest, DynamicCompactionOptions) {
// Increase level base size to 256KB and write enough data that will
// fill L1 and L2. L1 size should be around 256KB while L2 size should be
// around 256KB x 4.
ASSERT_OK (
dbfull ( ) - > SetOptions ( { { " max_bytes_for_level_base " , ToS tring( k1MB ) } } ) ) ;
ASSERT_OK ( dbfull ( ) - > SetOptions (
{ { " max_bytes_for_level_base " , std : : to_s tring( k1MB ) } } ) ) ;
// writing 96 x 64KB => 6 * 1024KB
// (L1 + L2) = (1 + 4) * 1024KB
@ -5196,9 +5198,9 @@ TEST_F(DBTest, DynamicCompactionOptions) {
// max_bytes_for_level_base. Now, reduce both mulitplier and level base,
// After filling enough data that can fit in L1 - L3, we should see L1 size
// reduces to 128KB from 256KB which was asserted previously. Same for L2.
ASSERT_OK (
dbfull ( ) - > SetOptions ( { { " max_bytes_for_level_multiplier " , " 2 " } ,
{ " max_bytes_for_level_base " , ToS tring( k128KB ) } } ) ) ;
ASSERT_OK ( dbfull ( ) - > SetOptions (
{ { " max_bytes_for_level_multiplier " , " 2 " } ,
{ " max_bytes_for_level_base " , std : : to_s tring( k128KB ) } } ) ) ;
// writing 20 x 64KB = 10 x 128KB
// (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
@ -5854,7 +5856,7 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
// put some data
for ( int table = 0 ; table < 4 ; + + table ) {
for ( int i = 0 ; i < 10 + table ; + + i ) {
ASSERT_OK ( Put ( ToS tring( table * 100 + i ) , " val " ) ) ;
ASSERT_OK ( Put ( std : : to_s tring( table * 100 + i ) , " val " ) ) ;
+ + n ;
}
ASSERT_OK ( Flush ( ) ) ;
@ -6238,7 +6240,7 @@ TEST_F(DBTest, LargeBatchWithColumnFamilies) {
( write_size / 1024 / 1024 ) , pass ) ;
for ( ; ; ) {
std : : string data ( 3000 , j + + % 127 + 20 ) ;
data + = ToS tring( j ) ;
data + = std : : to_s tring( j ) ;
ASSERT_OK ( batch . Put ( handles_ [ 0 ] , Slice ( data ) , Slice ( data ) ) ) ;
if ( batch . GetDataSize ( ) > write_size ) {
break ;