@ -8,6 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
# include <algorithm>
# include <iostream>
# include <set>
# include <unistd.h>
# include <unordered_set>
@ -23,20 +24,20 @@
# include "rocksdb/env.h"
# include "rocksdb/filter_policy.h"
# include "rocksdb/perf_context.h"
# include "table/plain_table_factory.h"
# include "rocksdb/slice.h"
# include "rocksdb/slice_transform.h"
# include "rocksdb/table.h"
# include "rocksdb/table_properties.h"
# include "table/block_based_table_factory.h"
# include "table/plain_table_factory.h"
# include "util/hash.h"
# include "util/hash_linklist_rep.h"
# include "utilities/merge_operators.h"
# include "util/logging.h"
# include "util/mutexlock.h"
# include "util/statistics.h"
# include "util/testharness.h"
# include "util/testutil.h"
# include "utilities/merge_operators.h"
namespace rocksdb {
@ -850,6 +851,28 @@ void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
ASSERT_EQ ( expected_entries_size , sum ) ;
}
std : : unordered_map < std : : string , size_t > GetMemoryUsage ( MemTable * memtable ) {
const auto & arena = memtable - > TEST_GetArena ( ) ;
return { { " memtable.approximate.usage " , memtable - > ApproximateMemoryUsage ( ) } ,
{ " arena.approximate.usage " , arena . ApproximateMemoryUsage ( ) } ,
{ " arena.allocated.memory " , arena . MemoryAllocatedBytes ( ) } ,
{ " arena.unused.bytes " , arena . AllocatedAndUnused ( ) } ,
{ " irregular.blocks " , arena . IrregularBlockNum ( ) } } ;
}
void PrintMemoryUsage ( const std : : unordered_map < std : : string , size_t > & usage ) {
for ( const auto & item : usage ) {
std : : cout < < " \t " < < item . first < < " : " < < item . second < < std : : endl ;
}
}
void AddRandomKV ( MemTable * memtable , Random * rnd , size_t arena_block_size ) {
memtable - > Add ( 0 , kTypeValue , RandomString ( rnd , 20 ) /* key */ ,
// make sure we will be able to generate some over sized entries
RandomString ( rnd , rnd - > Uniform ( arena_block_size / 4 ) * 1.15 +
10 ) /* value */ ) ;
}
TEST ( DBTest , Empty ) {
do {
Options options = CurrentOptions ( ) ;
@ -1922,7 +1945,7 @@ TEST(DBTest, NumImmutableMemTable) {
options . write_buffer_size = 1000000 ;
Reopen ( & options ) ;
std : : string big_value ( 1000000 , ' x ' ) ;
std : : string big_value ( 1000000 * 2 , ' x ' ) ;
std : : string num ;
SetPerfLevel ( kEnableTime ) ; ;
@ -2205,6 +2228,10 @@ TEST(DBTest, CompactionTrigger) {
ASSERT_EQ ( NumTableFilesAtLevel ( 1 ) , 1 ) ;
}
// TODO(kailiu) The tests on UniversalCompaction has some issues:
// 1. A lot of magic numbers ("11" or "12").
// 2. Made assumption on the memtable flush conidtions, which may change from
// time to time.
TEST ( DBTest , UniversalCompactionTrigger ) {
Options options = CurrentOptions ( ) ;
options . compaction_style = kCompactionStyleUniversal ;
@ -2222,8 +2249,8 @@ TEST(DBTest, UniversalCompactionTrigger) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 1 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2233,7 +2260,7 @@ TEST(DBTest, UniversalCompactionTrigger) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2257,8 +2284,8 @@ TEST(DBTest, UniversalCompactionTrigger) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 3 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2268,7 +2295,7 @@ TEST(DBTest, UniversalCompactionTrigger) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2286,8 +2313,8 @@ TEST(DBTest, UniversalCompactionTrigger) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 3 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2312,7 +2339,7 @@ TEST(DBTest, UniversalCompactionTrigger) {
// Stage 4:
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
// new file of size 1.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2326,7 +2353,7 @@ TEST(DBTest, UniversalCompactionTrigger) {
// Stage 5:
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
// a new file of size 1.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2356,8 +2383,8 @@ TEST(DBTest, UniversalCompactionSizeAmplification) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 1 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2392,8 +2419,8 @@ TEST(DBTest, UniversalCompactionOptions) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2431,8 +2458,8 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 1 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2442,7 +2469,7 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2463,8 +2490,8 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
for ( int num = 0 ;
num < options . level0_file_num_compaction_trigger - 3 ;
num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2474,7 +2501,7 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2485,7 +2512,7 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
// Stage 3:
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
// more file at level-0, which should trigger level-0 compaction.
for ( int i = 0 ; i < 12 ; i + + ) {
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , RandomString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
@ -2593,54 +2620,54 @@ TEST(DBTest, UniversalCompactionCompressRatio1) {
// The first compaction (2) is compressed.
for ( int num = 0 ; num < 2 ; num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
}
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 12 0000 * 2 * 0.9 ) ;
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 11 0000 * 2 * 0.9 ) ;
// The second compaction (4) is compressed
for ( int num = 0 ; num < 2 ; num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
}
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 12 0000 * 4 * 0.9 ) ;
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 11 0000 * 4 * 0.9 ) ;
// The third compaction (2 4) is compressed since this time it is
// (1 1 3.2) and 3.2/5.2 doesn't reach ratio.
for ( int num = 0 ; num < 2 ; num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
}
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 12 0000 * 6 * 0.9 ) ;
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) , 11 0000 * 6 * 0.9 ) ;
// When we start for the compaction up to (2 4 8), the latest
// compressed is not compressed.
for ( int num = 0 ; num < 8 ; num + + ) {
// Write 120KB (12 values, each 10K)
for ( int i = 0 ; i < 12 ; i + + ) {
// Write 110KB (11 values, each 10K)
for ( int i = 0 ; i < 11 ; i + + ) {
ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ;
key_idx + + ;
}
dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
}
ASSERT_GT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) ,
120000 * 12 * 0.8 + 110000 * 2 ) ;
ASSERT_GT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) ,
110000 * 11 * 0.8 + 110000 * 2 ) ;
}
TEST ( DBTest , UniversalCompactionCompressRatio2 ) {
@ -2666,8 +2693,8 @@ TEST(DBTest, UniversalCompactionCompressRatio2) {
dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ;
dbfull ( ) - > TEST_WaitForCompact ( ) ;
}
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) ,
120000 * 12 * 0.8 + 11 0000 * 2 ) ;
ASSERT_LT ( ( int ) dbfull ( ) - > TEST_GetLevel0TotalSize ( ) ,
120000 * 12 * 0.8 + 12 0000 * 2 ) ;
}
# endif