@ -8651,16 +8651,18 @@ TEST(DBTest, DynamicMemtableOptions) {
}
TEST ( DBTest , DynamicCompactionOptions ) {
// minimum write buffer size is enforced at 64KB
const uint64_t k32KB = 1 < < 15 ;
const uint64_t k64KB = 1 < < 16 ;
const uint64_t k128KB = 1 < < 17 ;
const uint64_t k256KB = 1 < < 18 ;
const uint64_t k5KB = 5 * 1024 ;
const uint64_t k4KB = 1 < < 12 ;
Options options ;
options . env = env_ ;
options . create_if_missing = true ;
options . compression = kNoCompression ;
options . hard_rate_limit = 1.1 ;
options . write_buffer_size = k128 KB ;
options . write_buffer_size = k64 KB ;
options . max_write_buffer_number = 2 ;
// Compaction related options
options . level0_file_num_compaction_trigger = 3 ;
@ -8669,9 +8671,9 @@ TEST(DBTest, DynamicCompactionOptions) {
options . max_grandparent_overlap_factor = 10 ;
options . expanded_compaction_factor = 25 ;
options . source_compaction_factor = 1 ;
options . target_file_size_base = k128 KB ;
options . target_file_size_base = k64 KB ;
options . target_file_size_multiplier = 1 ;
options . max_bytes_for_level_base = k256 KB ;
options . max_bytes_for_level_base = k128 KB ;
options . max_bytes_for_level_multiplier = 4 ;
// Block flush thread and disable compaction thread
@ -8689,65 +8691,71 @@ TEST(DBTest, DynamicCompactionOptions) {
// Write 3 files that have the same key range, trigger compaction and
// result in one L1 file
gen_l0_kb ( 0 , 128 , 1 ) ;
gen_l0_kb ( 0 , 64 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 1 ) ;
gen_l0_kb ( 0 , 128 , 1 ) ;
gen_l0_kb ( 0 , 64 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 2 ) ;
gen_l0_kb ( 0 , 128 , 1 ) ;
gen_l0_kb ( 0 , 64 ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_EQ ( " 0,1 " , FilesPerLevel ( ) ) ;
std : : vector < LiveFileMetaData > metadata ;
db_ - > GetLiveFilesMetaData ( & metadata ) ;
ASSERT_EQ ( 1U , metadata . size ( ) ) ;
ASSERT_LE ( metadata [ 0 ] . size , k128KB + k5 KB ) ; // < 128KB + 5KB
ASSERT_GE ( metadata [ 0 ] . size , k128KB - k5 KB ) ; // > 128B - 5KB
ASSERT_LE ( metadata [ 0 ] . size , k64KB + k4 KB ) ;
ASSERT_GE ( metadata [ 0 ] . size , k64KB - k4 KB ) ;
// Make compaction trigger and file size smaller
// Test compaction trigger and target_file_size_base
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " level0_file_num_compaction_trigger " , " 2 " } ,
{ " target_file_size_base " , " 65536 " }
{ " target_file_size_base " , std : : to_string ( k32KB ) }
} ) ) ;
gen_l0_kb ( 0 , 128 , 1 ) ;
gen_l0_kb ( 0 , 64 ) ;
ASSERT_EQ ( " 1,1 " , FilesPerLevel ( ) ) ;
gen_l0_kb ( 0 , 128 , 1 ) ;
gen_l0_kb ( 0 , 64 ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_EQ ( " 0,2 " , FilesPerLevel ( ) ) ;
metadata . clear ( ) ;
db_ - > GetLiveFilesMetaData ( & metadata ) ;
ASSERT_EQ ( 2U , metadata . size ( ) ) ;
ASSERT_LE ( metadata [ 0 ] . size , k64KB + k5 KB ) ; // < 64KB + 5KB
ASSERT_GE ( metadata [ 0 ] . size , k64KB - k5 KB ) ; // > 64KB - 5KB
ASSERT_LE ( metadata [ 0 ] . size , k32KB + k4 KB ) ;
ASSERT_GE ( metadata [ 0 ] . size , k32KB - k4 KB ) ;
// Change base level size to 1MB
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( { { " max_bytes_for_level_base " , " 1048576 " } } ) ) ;
// Test max_bytes_for_level_base
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " max_bytes_for_level_base " , std : : to_string ( k256KB ) }
} ) ) ;
// writing 56 x 128KB => 7MB
// (L1 + L2) = (1 + 4) * 1MB = 5M B
for ( int i = 0 ; i < 56 ; + + i ) {
gen_l0_kb ( i , 128 , 56 ) ;
// writing 24 x 64KB => 6 * 256K B
// (L1 + L2) = (1 + 4) * 256K B
for ( int i = 0 ; i < 24 ; + + i ) {
gen_l0_kb ( i , 64 , 32 ) ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_TRUE ( SizeAtLevel ( 1 ) > 1048576 * 0.9 & &
SizeAtLevel ( 1 ) < 1048576 * 1.1 ) ;
ASSERT_TRUE ( SizeAtLevel ( 2 ) > 4 * 1048576 * 0.9 & &
SizeAtLevel ( 2 ) < 4 * 1048576 * 1.1 ) ;
ASSERT_TRUE ( SizeAtLevel ( 1 ) > k256KB * 0.8 & &
SizeAtLevel ( 1 ) < k256KB * 1.2 ) ;
ASSERT_TRUE ( SizeAtLevel ( 2 ) > 4 * k256KB * 0.8 & &
SizeAtLevel ( 2 ) < 4 * k256KB * 1.2 ) ;
// Change multiplier to 2 with smaller base
// Test max_bytes_for_level_multiplier and
// max_bytes_for_level_base (reduce)
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " max_bytes_for_level_multiplier " , " 2 " } ,
{ " max_bytes_for_level_base " , " 262144 " }
{ " max_bytes_for_level_base " , std : : to_string ( k128KB ) }
} ) ) ;
// writing 16 x 128KB
// (L1 + L2 + L3) = (1 + 2 + 4) * 256 KB
for ( int i = 0 ; i < 16 ; + + i ) {
gen_l0_kb ( i , 128 , 50 ) ;
// writing 20 x 64KB = 10 x 128KB
// (L1 + L2 + L3) = (1 + 2 + 4) * 128 KB
for ( int i = 0 ; i < 20 ; + + i ) {
gen_l0_kb ( i , 64 , 32 ) ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_TRUE ( SizeAtLevel ( 1 ) < 262144 * 1.1 ) ;
ASSERT_TRUE ( SizeAtLevel ( 2 ) < 2 * 262144 * 1.1 ) ;
ASSERT_TRUE ( SizeAtLevel ( 3 ) < 4 * 262144 * 1.1 ) ;
ASSERT_TRUE ( SizeAtLevel ( 1 ) > k128KB * 0.8 & &
SizeAtLevel ( 1 ) < k128KB * 1.2 ) ;
ASSERT_TRUE ( SizeAtLevel ( 2 ) > 2 * k128KB * 0.8 & &
SizeAtLevel ( 2 ) < 2 * k128KB * 1.2 ) ;
ASSERT_TRUE ( SizeAtLevel ( 3 ) > 4 * k128KB * 0.8 & &
SizeAtLevel ( 3 ) < 4 * k128KB * 1.2 ) ;
// Clean up memtable and L0
dbfull ( ) - > CompactRange ( nullptr , nullptr ) ;
@ -8761,16 +8769,16 @@ TEST(DBTest, DynamicCompactionOptions) {
WriteOptions wo ;
wo . timeout_hint_us = 10000 ;
while ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) , wo ) . ok ( ) & & count < 64 ) {
// Wait for compaction so that put won't timeout
dbfull ( ) - > TEST_FlushMemTable ( true ) ;
count + + ;
}
// Stop trigger = 8
ASSERT_EQ ( count , 8 ) ;
// Unblock
sleeping_task_low1 . WakeUp ( ) ;
sleeping_task_low1 . WaitUntilDone ( ) ;
// Reduce stop trigger
// Test: stop trigger (reduce)
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " level0_stop_writes_trigger " , " 6 " }
} ) ) ;
@ -8783,7 +8791,6 @@ TEST(DBTest, DynamicCompactionOptions) {
Env : : Priority : : LOW ) ;
count = 0 ;
while ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) , wo ) . ok ( ) & & count < 64 ) {
// Wait for compaction so that put won't timeout
dbfull ( ) - > TEST_FlushMemTable ( true ) ;
count + + ;
}
@ -8820,6 +8827,56 @@ TEST(DBTest, DynamicCompactionOptions) {
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_LT ( NumTableFilesAtLevel ( 0 ) , 4 ) ;
// Test for hard_rate_limit, change max_bytes_for_level_base to make level
// size big
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " max_bytes_for_level_base " , std : : to_string ( k256KB ) }
} ) ) ;
// writing 40 x 64KB = 10 x 256KB
// (L1 + L2 + L3) = (1 + 2 + 4) * 256KB
for ( int i = 0 ; i < 40 ; + + i ) {
gen_l0_kb ( i , 64 , 32 ) ;
}
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_TRUE ( SizeAtLevel ( 1 ) > k256KB * 0.8 & &
SizeAtLevel ( 1 ) < k256KB * 1.2 ) ;
ASSERT_TRUE ( SizeAtLevel ( 2 ) > 2 * k256KB * 0.8 & &
SizeAtLevel ( 2 ) < 2 * k256KB * 1.2 ) ;
ASSERT_TRUE ( SizeAtLevel ( 3 ) > 4 * k256KB * 0.8 & &
SizeAtLevel ( 3 ) < 4 * k256KB * 1.2 ) ;
// Reduce max_bytes_for_level_base and disable compaction at the same time
// This should cause score to increase
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " disable_auto_compactions " , " true " } ,
{ " max_bytes_for_level_base " , " 65536 " } ,
} ) ) ;
ASSERT_OK ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) ) ) ;
dbfull ( ) - > TEST_FlushMemTable ( true ) ;
// Check score is above 2
ASSERT_TRUE ( SizeAtLevel ( 1 ) / k64KB > 2 | |
SizeAtLevel ( 2 ) / k64KB > 4 | |
SizeAtLevel ( 3 ) / k64KB > 8 ) ;
// Enfoce hard rate limit, L0 score is not regulated by this limit
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " hard_rate_limit " , " 2 " }
} ) ) ;
ASSERT_OK ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) ) ) ;
dbfull ( ) - > TEST_FlushMemTable ( true ) ;
// Hard rate limit slow down for 1000 us, so default 10ms should be ok
ASSERT_TRUE ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) , wo ) . ok ( ) ) ;
wo . timeout_hint_us = 500 ;
ASSERT_TRUE ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) , wo ) . IsTimedOut ( ) ) ;
// Bump up limit
ASSERT_TRUE ( dbfull ( ) - > SetOptions ( {
{ " hard_rate_limit " , " 100 " }
} ) ) ;
dbfull ( ) - > TEST_FlushMemTable ( true ) ;
ASSERT_TRUE ( Put ( Key ( count ) , RandomString ( & rnd , 1024 ) , wo ) . ok ( ) ) ;
}
} // namespace rocksdb