@ -4058,7 +4058,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
options . level0_file_num_compaction_trigger = 2 ;
options . level0_slowdown_writes_trigger = 2 ;
options . level0_stop_writes_trigger = 2 ;
options . target_file_size_base = 2048 ;
options . target_file_size_base = 20480 ;
options . level_compaction_dynamic_level_bytes = true ;
options . max_bytes_for_level_base = 102400 ;
options . max_bytes_for_level_multiplier = 4 ;
@ -4086,7 +4086,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
ASSERT_EQ ( NumTableFilesAtLevel ( 1 ) , 0 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 2 ) , 0 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 3 ) , 0 ) ;
ASSERT_GT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 4 ) , 20U * 4000U ) ;
// Assuming each files' metadata is at least 50 bytes/
ASSERT_GT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 4 ) , 20U * 4000U + 50U * 4 ) ;
// Insert 400KB. Some data will be compressed
for ( int i = 21 ; i < 120 ; i + + ) {
@ -4096,7 +4097,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
dbfull ( ) - > TEST_WaitForCompact ( ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 1 ) , 0 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 2 ) , 0 ) ;
ASSERT_LT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 3 ) + SizeAtLevel ( 4 ) , 120U * 4000U ) ;
ASSERT_LT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 3 ) + SizeAtLevel ( 4 ) ,
120U * 4000U + 50U * 24 ) ;
// Make sure data in files in L3 is not compacted by removing all files
// in L4 and calculate number of rows
ASSERT_OK ( dbfull ( ) - > SetOptions ( {
@ -4116,7 +4118,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
num_keys + + ;
}
ASSERT_OK ( iter - > status ( ) ) ;
ASSERT_GT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 3 ) , num_keys * 4000U ) ;
ASSERT_GT ( SizeAtLevel ( 0 ) + SizeAtLevel ( 3 ) , num_keys * 4000U + num_keys * 10U ) ;
}
TEST_F ( DBTest , DynamicLevelCompressionPerLevel2 ) {