@ -3,12 +3,15 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// found in the LICENSE file. See the AUTHORS file for names of contributors.
# include "leveldb/db.h"
# include "leveldb/db.h"
# include "leveldb/filter_policy.h"
# include "db/db_impl.h"
# include "db/db_impl.h"
# include "db/filename.h"
# include "db/filename.h"
# include "db/version_set.h"
# include "db/version_set.h"
# include "db/write_batch_internal.h"
# include "db/write_batch_internal.h"
# include "leveldb/cache.h"
# include "leveldb/env.h"
# include "leveldb/env.h"
# include "leveldb/table.h"
# include "leveldb/table.h"
# include "util/hash.h"
# include "util/logging.h"
# include "util/logging.h"
# include "util/mutexlock.h"
# include "util/mutexlock.h"
# include "util/testharness.h"
# include "util/testharness.h"
@ -22,6 +25,28 @@ static std::string RandomString(Random* rnd, int len) {
return r ;
return r ;
}
}
namespace {
class AtomicCounter {
private :
port : : Mutex mu_ ;
int count_ ;
public :
AtomicCounter ( ) : count_ ( 0 ) { }
void Increment ( ) {
MutexLock l ( & mu_ ) ;
count_ + + ;
}
int Read ( ) {
MutexLock l ( & mu_ ) ;
return count_ ;
}
void Reset ( ) {
MutexLock l ( & mu_ ) ;
count_ = 0 ;
}
} ;
}
// Special Env used to delay background operations
// Special Env used to delay background operations
class SpecialEnv : public EnvWrapper {
class SpecialEnv : public EnvWrapper {
public :
public :
@ -31,9 +56,13 @@ class SpecialEnv : public EnvWrapper {
// Simulate no-space errors while this pointer is non-NULL.
// Simulate no-space errors while this pointer is non-NULL.
port : : AtomicPointer no_space_ ;
port : : AtomicPointer no_space_ ;
bool count_random_reads_ ;
AtomicCounter random_read_counter_ ;
explicit SpecialEnv ( Env * base ) : EnvWrapper ( base ) {
explicit SpecialEnv ( Env * base ) : EnvWrapper ( base ) {
delay_sstable_sync_ . Release_Store ( NULL ) ;
delay_sstable_sync_ . Release_Store ( NULL ) ;
no_space_ . Release_Store ( NULL ) ;
no_space_ . Release_Store ( NULL ) ;
count_random_reads_ = false ;
}
}
Status NewWritableFile ( const std : : string & f , WritableFile * * r ) {
Status NewWritableFile ( const std : : string & f , WritableFile * * r ) {
@ -74,9 +103,44 @@ class SpecialEnv : public EnvWrapper {
}
}
return s ;
return s ;
}
}
Status NewRandomAccessFile ( const std : : string & f , RandomAccessFile * * r ) {
class CountingFile : public RandomAccessFile {
private :
RandomAccessFile * target_ ;
AtomicCounter * counter_ ;
public :
CountingFile ( RandomAccessFile * target , AtomicCounter * counter )
: target_ ( target ) , counter_ ( counter ) {
}
virtual ~ CountingFile ( ) { delete target_ ; }
virtual Status Read ( uint64_t offset , size_t n , Slice * result ,
char * scratch ) const {
counter_ - > Increment ( ) ;
return target_ - > Read ( offset , n , result , scratch ) ;
}
} ;
Status s = target ( ) - > NewRandomAccessFile ( f , r ) ;
if ( s . ok ( ) & & count_random_reads_ ) {
* r = new CountingFile ( * r , & random_read_counter_ ) ;
}
return s ;
}
} ;
} ;
class DBTest {
class DBTest {
private :
const FilterPolicy * filter_policy_ ;
// Sequence of option configurations to try
enum OptionConfig {
kDefault ,
kFilter ,
kEnd
} ;
int option_config_ ;
public :
public :
std : : string dbname_ ;
std : : string dbname_ ;
SpecialEnv * env_ ;
SpecialEnv * env_ ;
@ -84,7 +148,9 @@ class DBTest {
Options last_options_ ;
Options last_options_ ;
DBTest ( ) : env_ ( new SpecialEnv ( Env : : Default ( ) ) ) {
DBTest ( ) : option_config_ ( kDefault ) ,
env_ ( new SpecialEnv ( Env : : Default ( ) ) ) {
filter_policy_ = NewBloomFilterPolicy ( 10 ) ;
dbname_ = test : : TmpDir ( ) + " /db_test " ;
dbname_ = test : : TmpDir ( ) + " /db_test " ;
DestroyDB ( dbname_ , Options ( ) ) ;
DestroyDB ( dbname_ , Options ( ) ) ;
db_ = NULL ;
db_ = NULL ;
@ -95,6 +161,32 @@ class DBTest {
delete db_ ;
delete db_ ;
DestroyDB ( dbname_ , Options ( ) ) ;
DestroyDB ( dbname_ , Options ( ) ) ;
delete env_ ;
delete env_ ;
delete filter_policy_ ;
}
// Switch to a fresh database with the next option configuration to
// test. Return false if there are no more configurations to test.
bool ChangeOptions ( ) {
if ( option_config_ = = kEnd ) {
return false ;
} else {
option_config_ + + ;
DestroyAndReopen ( ) ;
return true ;
}
}
// Return the current option configuration.
Options CurrentOptions ( ) {
Options options ;
switch ( option_config_ ) {
case kFilter :
options . filter_policy = filter_policy_ ;
break ;
default :
break ;
}
return options ;
}
}
DBImpl * dbfull ( ) {
DBImpl * dbfull ( ) {
@ -105,6 +197,11 @@ class DBTest {
ASSERT_OK ( TryReopen ( options ) ) ;
ASSERT_OK ( TryReopen ( options ) ) ;
}
}
void Close ( ) {
delete db_ ;
db_ = NULL ;
}
void DestroyAndReopen ( Options * options = NULL ) {
void DestroyAndReopen ( Options * options = NULL ) {
delete db_ ;
delete db_ ;
db_ = NULL ;
db_ = NULL ;
@ -119,6 +216,7 @@ class DBTest {
if ( options ! = NULL ) {
if ( options ! = NULL ) {
opts = * options ;
opts = * options ;
} else {
} else {
opts = CurrentOptions ( ) ;
opts . create_if_missing = true ;
opts . create_if_missing = true ;
}
}
last_options_ = opts ;
last_options_ = opts ;
@ -189,8 +287,7 @@ class DBTest {
if ( ! ParseInternalKey ( iter - > key ( ) , & ikey ) ) {
if ( ! ParseInternalKey ( iter - > key ( ) , & ikey ) ) {
result + = " CORRUPTED " ;
result + = " CORRUPTED " ;
} else {
} else {
if ( last_options_ . comparator - > Compare (
if ( last_options_ . comparator - > Compare ( ikey . user_key , user_key ) ! = 0 ) {
ikey . user_key , user_key ) ! = 0 ) {
break ;
break ;
}
}
if ( ! first ) {
if ( ! first ) {
@ -314,30 +411,37 @@ class DBTest {
} ;
} ;
TEST ( DBTest , Empty ) {
TEST ( DBTest , Empty ) {
do {
ASSERT_TRUE ( db_ ! = NULL ) ;
ASSERT_TRUE ( db_ ! = NULL ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " foo " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , ReadWrite ) {
TEST ( DBTest , ReadWrite ) {
do {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_OK ( Put ( " bar " , " v2 " ) ) ;
ASSERT_OK ( Put ( " bar " , " v2 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v3 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v3 " ) ) ;
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , PutDeleteGet ) {
TEST ( DBTest , PutDeleteGet ) {
do {
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " foo " , " v1 " ) ) ;
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " foo " , " v1 " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " foo " , " v2 " ) ) ;
ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " foo " , " v2 " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
ASSERT_OK ( db_ - > Delete ( WriteOptions ( ) , " foo " ) ) ;
ASSERT_OK ( db_ - > Delete ( WriteOptions ( ) , " foo " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " foo " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetFromImmutableLayer ) {
TEST ( DBTest , GetFromImmutableLayer ) {
Options options ;
do {
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . env = env_ ;
options . write_buffer_size = 100000 ; // Small write buffer
options . write_buffer_size = 100000 ; // Small write buffer
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -350,15 +454,19 @@ TEST(DBTest, GetFromImmutableLayer) {
Put ( " k2 " , std : : string ( 100000 , ' y ' ) ) ; // Trigger compaction
Put ( " k2 " , std : : string ( 100000 , ' y ' ) ) ; // Trigger compaction
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
env_ - > delay_sstable_sync_ . Release_Store ( NULL ) ; // Release sync calls
env_ - > delay_sstable_sync_ . Release_Store ( NULL ) ; // Release sync calls
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetFromVersions ) {
TEST ( DBTest , GetFromVersions ) {
do {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetSnapshot ) {
TEST ( DBTest , GetSnapshot ) {
do {
// Try with both a short key and a long key
// Try with both a short key and a long key
for ( int i = 0 ; i < 2 ; i + + ) {
for ( int i = 0 ; i < 2 ; i + + ) {
std : : string key = ( i = = 0 ) ? std : : string ( " foo " ) : std : : string ( 200 , ' x ' ) ;
std : : string key = ( i = = 0 ) ? std : : string ( " foo " ) : std : : string ( 200 , ' x ' ) ;
@ -372,9 +480,11 @@ TEST(DBTest, GetSnapshot) {
ASSERT_EQ ( " v1 " , Get ( key , s1 ) ) ;
ASSERT_EQ ( " v1 " , Get ( key , s1 ) ) ;
db_ - > ReleaseSnapshot ( s1 ) ;
db_ - > ReleaseSnapshot ( s1 ) ;
}
}
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetLevel0Ordering ) {
TEST ( DBTest , GetLevel0Ordering ) {
do {
// Check that we process level-0 files in correct order. The code
// Check that we process level-0 files in correct order. The code
// below generates two level-0 files where the earlier one comes
// below generates two level-0 files where the earlier one comes
// before the later one in the level-0 file list since the earlier
// before the later one in the level-0 file list since the earlier
@ -385,9 +495,11 @@ TEST(DBTest, GetLevel0Ordering) {
ASSERT_OK ( Put ( " foo " , " v2 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v2 " ) ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetOrderedByLevels ) {
TEST ( DBTest , GetOrderedByLevels ) {
do {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
Compact ( " a " , " z " ) ;
Compact ( " a " , " z " ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
@ -395,9 +507,11 @@ TEST(DBTest, GetOrderedByLevels) {
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetPicksCorrectFile ) {
TEST ( DBTest , GetPicksCorrectFile ) {
do {
// Arrange to have multiple files in a non-level-0 level.
// Arrange to have multiple files in a non-level-0 level.
ASSERT_OK ( Put ( " a " , " va " ) ) ;
ASSERT_OK ( Put ( " a " , " va " ) ) ;
Compact ( " a " , " b " ) ;
Compact ( " a " , " b " ) ;
@ -408,9 +522,11 @@ TEST(DBTest, GetPicksCorrectFile) {
ASSERT_EQ ( " va " , Get ( " a " ) ) ;
ASSERT_EQ ( " va " , Get ( " a " ) ) ;
ASSERT_EQ ( " vf " , Get ( " f " ) ) ;
ASSERT_EQ ( " vf " , Get ( " f " ) ) ;
ASSERT_EQ ( " vx " , Get ( " x " ) ) ;
ASSERT_EQ ( " vx " , Get ( " x " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , GetEncountersEmptyLevel ) {
TEST ( DBTest , GetEncountersEmptyLevel ) {
do {
// Arrange for the following to happen:
// Arrange for the following to happen:
// * sstable A in level 0
// * sstable A in level 0
// * nothing in level 1
// * nothing in level 1
@ -443,6 +559,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
read_count + + ;
read_count + + ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " missing " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " missing " ) ) ;
}
}
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , IterEmpty ) {
TEST ( DBTest , IterEmpty ) {
@ -620,6 +737,7 @@ TEST(DBTest, IterSmallAndLargeMix) {
}
}
TEST ( DBTest , IterMultiWithDelete ) {
TEST ( DBTest , IterMultiWithDelete ) {
do {
ASSERT_OK ( Put ( " a " , " va " ) ) ;
ASSERT_OK ( Put ( " a " , " va " ) ) ;
ASSERT_OK ( Put ( " b " , " vb " ) ) ;
ASSERT_OK ( Put ( " b " , " vb " ) ) ;
ASSERT_OK ( Put ( " c " , " vc " ) ) ;
ASSERT_OK ( Put ( " c " , " vc " ) ) ;
@ -632,9 +750,11 @@ TEST(DBTest, IterMultiWithDelete) {
iter - > Prev ( ) ;
iter - > Prev ( ) ;
ASSERT_EQ ( IterStatus ( iter ) , " a->va " ) ;
ASSERT_EQ ( IterStatus ( iter ) , " a->va " ) ;
delete iter ;
delete iter ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , Recover ) {
TEST ( DBTest , Recover ) {
do {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " baz " , " v5 " ) ) ;
ASSERT_OK ( Put ( " baz " , " v5 " ) ) ;
@ -652,9 +772,11 @@ TEST(DBTest, Recover) {
ASSERT_EQ ( " v4 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v4 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
ASSERT_EQ ( " v5 " , Get ( " baz " ) ) ;
ASSERT_EQ ( " v5 " , Get ( " baz " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , RecoveryWithEmptyLog ) {
TEST ( DBTest , RecoveryWithEmptyLog ) {
do {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v2 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v2 " ) ) ;
Reopen ( ) ;
Reopen ( ) ;
@ -662,12 +784,14 @@ TEST(DBTest, RecoveryWithEmptyLog) {
ASSERT_OK ( Put ( " foo " , " v3 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v3 " ) ) ;
Reopen ( ) ;
Reopen ( ) ;
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v3 " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
// Check that writes done during a memtable compaction are recovered
// Check that writes done during a memtable compaction are recovered
// if the database is shutdown during the memtable compaction.
// if the database is shutdown during the memtable compaction.
TEST ( DBTest , RecoverDuringMemtableCompaction ) {
TEST ( DBTest , RecoverDuringMemtableCompaction ) {
Options options ;
do {
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . env = env_ ;
options . write_buffer_size = 1000000 ;
options . write_buffer_size = 1000000 ;
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -683,6 +807,7 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
ASSERT_EQ ( " v2 " , Get ( " bar " ) ) ;
ASSERT_EQ ( std : : string ( 10000000 , ' x ' ) , Get ( " big1 " ) ) ;
ASSERT_EQ ( std : : string ( 10000000 , ' x ' ) , Get ( " big1 " ) ) ;
ASSERT_EQ ( std : : string ( 1000 , ' y ' ) , Get ( " big2 " ) ) ;
ASSERT_EQ ( std : : string ( 1000 , ' y ' ) , Get ( " big2 " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
static std : : string Key ( int i ) {
static std : : string Key ( int i ) {
@ -692,7 +817,7 @@ static std::string Key(int i) {
}
}
TEST ( DBTest , MinorCompactionsHappen ) {
TEST ( DBTest , MinorCompactionsHappen ) {
Options options ;
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 10000 ;
options . write_buffer_size = 10000 ;
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -718,7 +843,7 @@ TEST(DBTest, MinorCompactionsHappen) {
TEST ( DBTest , RecoverWithLargeLog ) {
TEST ( DBTest , RecoverWithLargeLog ) {
{
{
Options options ;
Options options = CurrentOptions ( ) ;
Reopen ( & options ) ;
Reopen ( & options ) ;
ASSERT_OK ( Put ( " big1 " , std : : string ( 200000 , ' 1 ' ) ) ) ;
ASSERT_OK ( Put ( " big1 " , std : : string ( 200000 , ' 1 ' ) ) ) ;
ASSERT_OK ( Put ( " big2 " , std : : string ( 200000 , ' 2 ' ) ) ) ;
ASSERT_OK ( Put ( " big2 " , std : : string ( 200000 , ' 2 ' ) ) ) ;
@ -729,7 +854,7 @@ TEST(DBTest, RecoverWithLargeLog) {
// Make sure that if we re-open with a small write buffer size that
// Make sure that if we re-open with a small write buffer size that
// we flush table files in the middle of a large log file.
// we flush table files in the middle of a large log file.
Options options ;
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 100000 ;
options . write_buffer_size = 100000 ;
Reopen ( & options ) ;
Reopen ( & options ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 3 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 3 ) ;
@ -741,7 +866,7 @@ TEST(DBTest, RecoverWithLargeLog) {
}
}
TEST ( DBTest , CompactionsGenerateMultipleFiles ) {
TEST ( DBTest , CompactionsGenerateMultipleFiles ) {
Options options ;
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 100000000 ; // Large write buffer
options . write_buffer_size = 100000000 ; // Large write buffer
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -767,7 +892,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
}
}
TEST ( DBTest , RepeatedWritesToSameKey ) {
TEST ( DBTest , RepeatedWritesToSameKey ) {
Options options ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . env = env_ ;
options . write_buffer_size = 100000 ; // Small write buffer
options . write_buffer_size = 100000 ; // Small write buffer
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -786,7 +911,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
}
}
TEST ( DBTest , SparseMerge ) {
TEST ( DBTest , SparseMerge ) {
Options options ;
Options options = CurrentOptions ( ) ;
options . compression = kNoCompression ;
options . compression = kNoCompression ;
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -837,7 +962,8 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
}
}
TEST ( DBTest , ApproximateSizes ) {
TEST ( DBTest , ApproximateSizes ) {
Options options ;
do {
Options options = CurrentOptions ( ) ;
options . write_buffer_size = 100000000 ; // Large write buffer
options . write_buffer_size = 100000000 ; // Large write buffer
options . compression = kNoCompression ;
options . compression = kNoCompression ;
DestroyAndReopen ( ) ;
DestroyAndReopen ( ) ;
@ -849,9 +975,11 @@ TEST(DBTest, ApproximateSizes) {
// Write 8MB (80 values, each 100K)
// Write 8MB (80 values, each 100K)
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ;
const int N = 80 ;
const int N = 80 ;
static const int S1 = 100000 ;
static const int S2 = 105000 ; // Allow some expansion from metadata
Random rnd ( 301 ) ;
Random rnd ( 301 ) ;
for ( int i = 0 ; i < N ; i + + ) {
for ( int i = 0 ; i < N ; i + + ) {
ASSERT_OK ( Put ( Key ( i ) , RandomString ( & rnd , 100000 ) ) ) ;
ASSERT_OK ( Put ( Key ( i ) , RandomString ( & rnd , S1 ) ) ) ;
}
}
// 0 because GetApproximateSizes() does not account for memtable space
// 0 because GetApproximateSizes() does not account for memtable space
@ -863,14 +991,12 @@ TEST(DBTest, ApproximateSizes) {
for ( int compact_start = 0 ; compact_start < N ; compact_start + = 10 ) {
for ( int compact_start = 0 ; compact_start < N ; compact_start + = 10 ) {
for ( int i = 0 ; i < N ; i + = 10 ) {
for ( int i = 0 ; i < N ; i + = 10 ) {
ASSERT_TRUE ( Between ( Size ( " " , Key ( i ) ) , 100000 * i , 100000 * i + 10000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( i ) ) , S1 * i , S2 * i ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( i ) + " .suffix " ) ,
ASSERT_TRUE ( Between ( Size ( " " , Key ( i ) + " .suffix " ) , S1 * ( i + 1 ) , S2 * ( i + 1 ) ) ) ;
100000 * ( i + 1 ) , 100000 * ( i + 1 ) + 10000 ) ) ;
ASSERT_TRUE ( Between ( Size ( Key ( i ) , Key ( i + 10 ) ) , S1 * 10 , S2 * 10 ) ) ;
ASSERT_TRUE ( Between ( Size ( Key ( i ) , Key ( i + 10 ) ) ,
100000 * 10 , 100000 * 10 + 10000 ) ) ;
}
}
ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) ) , 500000 0 , 501000 0 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) ) , S1 * 50 , S2 * 50 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) + " .suffix " ) , 510000 0 , 511000 0 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) + " .suffix " ) , S1 * 50 , S2 * 50 ) ) ;
std : : string cstart_str = Key ( compact_start ) ;
std : : string cstart_str = Key ( compact_start ) ;
std : : string cend_str = Key ( compact_start + 9 ) ;
std : : string cend_str = Key ( compact_start + 9 ) ;
@ -882,10 +1008,12 @@ TEST(DBTest, ApproximateSizes) {
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ;
ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ;
ASSERT_GT ( NumTableFilesAtLevel ( 1 ) , 0 ) ;
ASSERT_GT ( NumTableFilesAtLevel ( 1 ) , 0 ) ;
}
}
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , ApproximateSizes_MixOfSmallAndLarge ) {
TEST ( DBTest , ApproximateSizes_MixOfSmallAndLarge ) {
Options options ;
do {
Options options = CurrentOptions ( ) ;
options . compression = kNoCompression ;
options . compression = kNoCompression ;
Reopen ( ) ;
Reopen ( ) ;
@ -912,12 +1040,13 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_TRUE ( Between ( Size ( " " , Key ( 5 ) ) , 230000 , 231000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 5 ) ) , 230000 , 231000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 6 ) ) , 240000 , 241000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 6 ) ) , 240000 , 241000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 7 ) ) , 540000 , 541000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 7 ) ) , 540000 , 541000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 8 ) ) , 550000 , 551 000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , Key ( 8 ) ) , 550000 , 560 000 ) ) ;
ASSERT_TRUE ( Between ( Size ( Key ( 3 ) , Key ( 5 ) ) , 110000 , 111000 ) ) ;
ASSERT_TRUE ( Between ( Size ( Key ( 3 ) , Key ( 5 ) ) , 110000 , 111000 ) ) ;
dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ;
dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ;
}
}
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , IteratorPinsRef ) {
TEST ( DBTest , IteratorPinsRef ) {
@ -943,6 +1072,7 @@ TEST(DBTest, IteratorPinsRef) {
}
}
TEST ( DBTest , Snapshot ) {
TEST ( DBTest , Snapshot ) {
do {
Put ( " foo " , " v1 " ) ;
Put ( " foo " , " v1 " ) ;
const Snapshot * s1 = db_ - > GetSnapshot ( ) ;
const Snapshot * s1 = db_ - > GetSnapshot ( ) ;
Put ( " foo " , " v2 " ) ;
Put ( " foo " , " v2 " ) ;
@ -967,9 +1097,11 @@ TEST(DBTest, Snapshot) {
db_ - > ReleaseSnapshot ( s2 ) ;
db_ - > ReleaseSnapshot ( s2 ) ;
ASSERT_EQ ( " v4 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v4 " , Get ( " foo " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , HiddenValuesAreRemoved ) {
TEST ( DBTest , HiddenValuesAreRemoved ) {
do {
Random rnd ( 301 ) ;
Random rnd ( 301 ) ;
FillLevels ( " a " , " z " ) ;
FillLevels ( " a " , " z " ) ;
@ -996,6 +1128,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ tiny ] " ) ;
ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ tiny ] " ) ;
ASSERT_TRUE ( Between ( Size ( " " , " pastfoo " ) , 0 , 1000 ) ) ;
ASSERT_TRUE ( Between ( Size ( " " , " pastfoo " ) , 0 , 1000 ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , DeletionMarkers1 ) {
TEST ( DBTest , DeletionMarkers1 ) {
@ -1054,6 +1187,7 @@ TEST(DBTest, DeletionMarkers2) {
}
}
TEST ( DBTest , OverlapInLevel0 ) {
TEST ( DBTest , OverlapInLevel0 ) {
do {
ASSERT_EQ ( config : : kMaxMemCompactLevel , 2 ) < < " Fix test to match config " ;
ASSERT_EQ ( config : : kMaxMemCompactLevel , 2 ) < < " Fix test to match config " ;
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
@ -1090,6 +1224,7 @@ TEST(DBTest, OverlapInLevel0) {
dbfull ( ) - > TEST_CompactMemTable ( ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
ASSERT_EQ ( " 3 " , FilesPerLevel ( ) ) ;
ASSERT_EQ ( " 3 " , FilesPerLevel ( ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " 600 " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " 600 " ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
TEST ( DBTest , L0_CompactionBug_Issue44_a ) {
TEST ( DBTest , L0_CompactionBug_Issue44_a ) {
@ -1150,7 +1285,7 @@ TEST(DBTest, ComparatorCheck) {
}
}
} ;
} ;
NewComparator cmp ;
NewComparator cmp ;
Options new_options ;
Options new_options = CurrentOptions ( ) ;
new_options . comparator = & cmp ;
new_options . comparator = & cmp ;
Status s = TryReopen ( & new_options ) ;
Status s = TryReopen ( & new_options ) ;
ASSERT_TRUE ( ! s . ok ( ) ) ;
ASSERT_TRUE ( ! s . ok ( ) ) ;
@ -1185,9 +1320,10 @@ TEST(DBTest, CustomComparator) {
}
}
} ;
} ;
NumberComparator cmp ;
NumberComparator cmp ;
Options new_options ;
Options new_options = CurrentOptions ( ) ;
new_options . create_if_missing = true ;
new_options . create_if_missing = true ;
new_options . comparator = & cmp ;
new_options . comparator = & cmp ;
new_options . filter_policy = NULL ; // Cannot use bloom filters
new_options . write_buffer_size = 1000 ; // Compact more often
new_options . write_buffer_size = 1000 ; // Compact more often
DestroyAndReopen ( & new_options ) ;
DestroyAndReopen ( & new_options ) ;
ASSERT_OK ( Put ( " [10] " , " ten " ) ) ;
ASSERT_OK ( Put ( " [10] " , " ten " ) ) ;
@ -1197,6 +1333,8 @@ TEST(DBTest, CustomComparator) {
ASSERT_EQ ( " ten " , Get ( " [0xa] " ) ) ;
ASSERT_EQ ( " ten " , Get ( " [0xa] " ) ) ;
ASSERT_EQ ( " twenty " , Get ( " [20] " ) ) ;
ASSERT_EQ ( " twenty " , Get ( " [20] " ) ) ;
ASSERT_EQ ( " twenty " , Get ( " [0x14] " ) ) ;
ASSERT_EQ ( " twenty " , Get ( " [0x14] " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " [15] " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " [0xf] " ) ) ;
Compact ( " [0] " , " [9999] " ) ;
Compact ( " [0] " , " [9999] " ) ;
}
}
@ -1285,7 +1423,7 @@ TEST(DBTest, DBOpen_Options) {
// Check that number of files does not grow when we are out of space
// Check that number of files does not grow when we are out of space
TEST ( DBTest , NoSpace ) {
TEST ( DBTest , NoSpace ) {
Options options ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . env = env_ ;
Reopen ( & options ) ;
Reopen ( & options ) ;
@ -1314,6 +1452,53 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
ASSERT_EQ ( CountFiles ( ) , num_files ) ;
ASSERT_EQ ( CountFiles ( ) , num_files ) ;
}
}
TEST ( DBTest , BloomFilter ) {
env_ - > count_random_reads_ = true ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . block_cache = NewLRUCache ( 0 ) ; // Prevent cache hits
options . filter_policy = NewBloomFilterPolicy ( 10 ) ;
Reopen ( & options ) ;
// Populate multiple layers
const int N = 10000 ;
for ( int i = 0 ; i < N ; i + + ) {
ASSERT_OK ( Put ( Key ( i ) , Key ( i ) ) ) ;
}
Compact ( " a " , " z " ) ;
for ( int i = 0 ; i < N ; i + = 100 ) {
ASSERT_OK ( Put ( Key ( i ) , Key ( i ) ) ) ;
}
dbfull ( ) - > TEST_CompactMemTable ( ) ;
// Prevent auto compactions triggered by seeks
env_ - > delay_sstable_sync_ . Release_Store ( env_ ) ;
// Lookup present keys. Should rarely read from small sstable.
env_ - > random_read_counter_ . Reset ( ) ;
for ( int i = 0 ; i < N ; i + + ) {
ASSERT_EQ ( Key ( i ) , Get ( Key ( i ) ) ) ;
}
int reads = env_ - > random_read_counter_ . Read ( ) ;
fprintf ( stderr , " %d present => %d reads \n " , N , reads ) ;
ASSERT_GE ( reads , N ) ;
ASSERT_LE ( reads , N + 2 * N / 100 ) ;
// Lookup present keys. Should rarely read from either sstable.
env_ - > random_read_counter_ . Reset ( ) ;
for ( int i = 0 ; i < N ; i + + ) {
ASSERT_EQ ( " NOT_FOUND " , Get ( Key ( i ) + " .missing " ) ) ;
}
reads = env_ - > random_read_counter_ . Read ( ) ;
fprintf ( stderr , " %d missing => %d reads \n " , N , reads ) ;
ASSERT_LE ( reads , 3 * N / 100 ) ;
env_ - > delay_sstable_sync_ . Release_Store ( NULL ) ;
Close ( ) ;
delete options . block_cache ;
delete options . filter_policy ;
}
// Multi-threaded test:
// Multi-threaded test:
namespace {
namespace {
@ -1381,6 +1566,7 @@ static void MTThreadBody(void* arg) {
} // namespace
} // namespace
TEST ( DBTest , MultiThreaded ) {
TEST ( DBTest , MultiThreaded ) {
do {
// Initialize state
// Initialize state
MTState mt ;
MTState mt ;
mt . test = this ;
mt . test = this ;
@ -1408,6 +1594,7 @@ TEST(DBTest, MultiThreaded) {
env_ - > SleepForMicroseconds ( 100000 ) ;
env_ - > SleepForMicroseconds ( 100000 ) ;
}
}
}
}
} while ( ChangeOptions ( ) ) ;
}
}
namespace {
namespace {
@ -1573,7 +1760,8 @@ static bool CompareIterators(int step,
TEST ( DBTest , Randomized ) {
TEST ( DBTest , Randomized ) {
Random rnd ( test : : RandomSeed ( ) ) ;
Random rnd ( test : : RandomSeed ( ) ) ;
ModelDB model ( last_options_ ) ;
do {
ModelDB model ( CurrentOptions ( ) ) ;
const int N = 10000 ;
const int N = 10000 ;
const Snapshot * model_snap = NULL ;
const Snapshot * model_snap = NULL ;
const Snapshot * db_snap = NULL ;
const Snapshot * db_snap = NULL ;
@ -1582,6 +1770,7 @@ TEST(DBTest, Randomized) {
if ( step % 100 = = 0 ) {
if ( step % 100 = = 0 ) {
fprintf ( stderr , " Step %d of %d \n " , step , N ) ;
fprintf ( stderr , " Step %d of %d \n " , step , N ) ;
}
}
// TODO(sanjay): Test Get() works
int p = rnd . Uniform ( 100 ) ;
int p = rnd . Uniform ( 100 ) ;
if ( p < 45 ) { // Put
if ( p < 45 ) { // Put
k = RandomKey ( & rnd ) ;
k = RandomKey ( & rnd ) ;
@ -1637,6 +1826,7 @@ TEST(DBTest, Randomized) {
}
}
if ( model_snap ! = NULL ) model . ReleaseSnapshot ( model_snap ) ;
if ( model_snap ! = NULL ) model . ReleaseSnapshot ( model_snap ) ;
if ( db_snap ! = NULL ) db_ - > ReleaseSnapshot ( db_snap ) ;
if ( db_snap ! = NULL ) db_ - > ReleaseSnapshot ( db_snap ) ;
} while ( ChangeOptions ( ) ) ;
}
}
std : : string MakeKey ( unsigned int num ) {
std : : string MakeKey ( unsigned int num ) {