@ -104,6 +104,7 @@ class TestReadOnlyWithCompressedCache
TEST_P ( TestReadOnlyWithCompressedCache , ReadOnlyWithCompressedCache ) {
TEST_P ( TestReadOnlyWithCompressedCache , ReadOnlyWithCompressedCache ) {
if ( use_mmap_ & & ! IsMemoryMappedAccessSupported ( ) ) {
if ( use_mmap_ & & ! IsMemoryMappedAccessSupported ( ) ) {
ROCKSDB_GTEST_SKIP ( " Test requires MMAP support " ) ;
return ;
return ;
}
}
ASSERT_OK ( Put ( " foo " , " bar " ) ) ;
ASSERT_OK ( Put ( " foo " , " bar " ) ) ;
@ -291,7 +292,7 @@ TEST_F(DBTest2, CacheIndexAndFilterWithDBRestart) {
BlockBasedTableOptions table_options ;
BlockBasedTableOptions table_options ;
table_options . cache_index_and_filter_blocks = true ;
table_options . cache_index_and_filter_blocks = true ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( table_options ) ) ;
CreateAndReopenWithCF ( { " pikachu " } , options ) ;
CreateAndReopenWithCF ( { " pikachu " } , options ) ;
Put ( 1 , " a " , " begin " ) ;
Put ( 1 , " a " , " begin " ) ;
@ -1344,7 +1345,7 @@ TEST_F(DBTest2, PresetCompressionDictLocality) {
options . target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry ;
options . target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry ;
BlockBasedTableOptions table_options ;
BlockBasedTableOptions table_options ;
table_options . cache_index_and_filter_blocks = true ;
table_options . cache_index_and_filter_blocks = true ;
options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( table_options ) ) ;
Reopen ( options ) ;
Reopen ( options ) ;
Random rnd ( 301 ) ;
Random rnd ( 301 ) ;
@ -1470,7 +1471,7 @@ TEST_P(CompressionFailuresTest, CompressionFailures) {
BlockBasedTableOptions table_options ;
BlockBasedTableOptions table_options ;
table_options . block_size = 512 ;
table_options . block_size = 512 ;
table_options . verify_compression = true ;
table_options . verify_compression = true ;
options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( table_options ) ) ;
options . compression = compression_type_ ;
options . compression = compression_type_ ;
options . compression_opts . parallel_threads = compression_parallel_threads_ ;
options . compression_opts . parallel_threads = compression_parallel_threads_ ;
@ -1808,7 +1809,7 @@ class PinL0IndexAndFilterBlocksTest
table_options . cache_index_and_filter_blocks = true ;
table_options . cache_index_and_filter_blocks = true ;
table_options . pin_l0_filter_and_index_blocks_in_cache = true ;
table_options . pin_l0_filter_and_index_blocks_in_cache = true ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
options - > table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ;
options - > table_factory . reset ( New BlockBasedTableFactory( table_options ) ) ;
CreateAndReopenWithCF ( { " pikachu " } , * options ) ;
CreateAndReopenWithCF ( { " pikachu " } , * options ) ;
Put ( 1 , " a " , " begin " ) ;
Put ( 1 , " a " , " begin " ) ;
@ -1848,7 +1849,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest,
table_options . cache_index_and_filter_blocks = true ;
table_options . cache_index_and_filter_blocks = true ;
table_options . pin_l0_filter_and_index_blocks_in_cache = true ;
table_options . pin_l0_filter_and_index_blocks_in_cache = true ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ;
options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( table_options ) ) ;
CreateAndReopenWithCF ( { " pikachu " } , options ) ;
CreateAndReopenWithCF ( { " pikachu " } , options ) ;
ASSERT_OK ( Put ( 1 , " key " , " val " ) ) ;
ASSERT_OK ( Put ( 1 , " key " , " val " ) ) ;
@ -2485,26 +2486,30 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
{
{
const int kIdBufLen = 100 ;
const int kIdBufLen = 100 ;
char id_buf [ kIdBufLen ] ;
char id_buf [ kIdBufLen ] ;
Status s = Status : : NotSupported ( ) ;
# ifndef OS_WIN
# ifndef OS_WIN
// You can't open a directory on windows using random access file
// You can't open a directory on windows using random access file
std : : unique_ptr < RandomAccessFile > file ;
std : : unique_ptr < RandomAccessFile > file ;
ASSERT_OK ( env_ - > NewRandomAccessFile ( dbname_ , & file , EnvOptions ( ) ) ) ;
s = env_ - > NewRandomAccessFile ( dbname_ , & file , EnvOptions ( ) ) ;
if ( file - > GetUniqueId ( id_buf , kIdBufLen ) = = 0 ) {
if ( s . ok ( ) ) {
// fs holding db directory doesn't support getting a unique file id,
if ( file - > GetUniqueId ( id_buf , kIdBufLen ) = = 0 ) {
// this means that running this test will fail because lru_cache will load
// fs holding db directory doesn't support getting a unique file id,
// the blocks again regardless of them being already in the cache
// this means that running this test will fail because lru_cache will
return ;
// load the blocks again regardless of them being already in the cache
}
return ;
# else
}
std : : unique_ptr < Directory > dir ;
ASSERT_OK ( env_ - > NewDirectory ( dbname_ , & dir ) ) ;
if ( dir - > GetUniqueId ( id_buf , kIdBufLen ) = = 0 ) {
// fs holding db directory doesn't support getting a unique file id,
// this means that running this test will fail because lru_cache will load
// the blocks again regardless of them being already in the cache
return ;
}
}
# endif
# endif
if ( ! s . ok ( ) ) {
std : : unique_ptr < Directory > dir ;
ASSERT_OK ( env_ - > NewDirectory ( dbname_ , & dir ) ) ;
if ( dir - > GetUniqueId ( id_buf , kIdBufLen ) = = 0 ) {
// fs holding db directory doesn't support getting a unique file id,
// this means that running this test will fail because lru_cache will
// load the blocks again regardless of them being already in the cache
return ;
}
}
}
}
uint32_t bytes_per_bit [ 2 ] = { 1 , 16 } ;
uint32_t bytes_per_bit [ 2 ] = { 1 , 16 } ;
for ( size_t k = 0 ; k < 2 ; k + + ) {
for ( size_t k = 0 ; k < 2 ; k + + ) {
@ -3297,7 +3302,7 @@ TEST_F(DBTest2, RateLimitedCompactionReads) {
BlockBasedTableOptions bbto ;
BlockBasedTableOptions bbto ;
bbto . block_size = 16384 ;
bbto . block_size = 16384 ;
bbto . no_block_cache = true ;
bbto . no_block_cache = true ;
options . table_factory . reset ( new BlockBasedTableFactory ( bbto ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( bbto ) ) ;
DestroyAndReopen ( options ) ;
DestroyAndReopen ( options ) ;
for ( int i = 0 ; i < kNumL0Files ; + + i ) {
for ( int i = 0 ; i < kNumL0Files ; + + i ) {
@ -3342,6 +3347,7 @@ TEST_F(DBTest2, RateLimitedCompactionReads) {
// is on levels higher than the new num_levels.
// is on levels higher than the new num_levels.
TEST_F ( DBTest2 , ReduceLevel ) {
TEST_F ( DBTest2 , ReduceLevel ) {
Options options ;
Options options ;
options . env = env_ ;
options . disable_auto_compactions = true ;
options . disable_auto_compactions = true ;
options . num_levels = 7 ;
options . num_levels = 7 ;
Reopen ( options ) ;
Reopen ( options ) ;
@ -3370,6 +3376,7 @@ TEST_F(DBTest2, ReadCallbackTest) {
Options options ;
Options options ;
options . disable_auto_compactions = true ;
options . disable_auto_compactions = true ;
options . num_levels = 7 ;
options . num_levels = 7 ;
options . env = env_ ;
Reopen ( options ) ;
Reopen ( options ) ;
std : : vector < const Snapshot * > snapshots ;
std : : vector < const Snapshot * > snapshots ;
// Try to create a db with multiple layers and a memtable
// Try to create a db with multiple layers and a memtable
@ -3629,7 +3636,9 @@ TEST_F(DBTest2, TraceAndReplay) {
column_families . push_back (
column_families . push_back (
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
std : : vector < ColumnFamilyHandle * > handles ;
std : : vector < ColumnFamilyHandle * > handles ;
ASSERT_OK ( DB : : Open ( DBOptions ( ) , dbname2 , column_families , & handles , & db2 ) ) ;
DBOptions db_opts ;
db_opts . env = env_ ;
ASSERT_OK ( DB : : Open ( db_opts , dbname2 , column_families , & handles , & db2 ) ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
// Verify that the keys don't already exist
// Verify that the keys don't already exist
@ -3704,7 +3713,9 @@ TEST_F(DBTest2, TraceWithLimit) {
column_families . push_back (
column_families . push_back (
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
std : : vector < ColumnFamilyHandle * > handles ;
std : : vector < ColumnFamilyHandle * > handles ;
ASSERT_OK ( DB : : Open ( DBOptions ( ) , dbname2 , column_families , & handles , & db2 ) ) ;
DBOptions db_opts ;
db_opts . env = env_ ;
ASSERT_OK ( DB : : Open ( db_opts , dbname2 , column_families , & handles , & db2 ) ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
// Verify that the keys don't already exist
// Verify that the keys don't already exist
@ -3772,7 +3783,9 @@ TEST_F(DBTest2, TraceWithSampling) {
column_families . push_back (
column_families . push_back (
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
std : : vector < ColumnFamilyHandle * > handles ;
std : : vector < ColumnFamilyHandle * > handles ;
ASSERT_OK ( DB : : Open ( DBOptions ( ) , dbname2 , column_families , & handles , & db2 ) ) ;
DBOptions db_opts ;
db_opts . env = env_ ;
ASSERT_OK ( DB : : Open ( db_opts , dbname2 , column_families , & handles , & db2 ) ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
ASSERT_TRUE ( db2 - > Get ( ro , handles [ 0 ] , " a " , & value ) . IsNotFound ( ) ) ;
ASSERT_TRUE ( db2 - > Get ( ro , handles [ 0 ] , " a " , & value ) . IsNotFound ( ) ) ;
@ -3872,7 +3885,9 @@ TEST_F(DBTest2, TraceWithFilter) {
column_families . push_back (
column_families . push_back (
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
ColumnFamilyDescriptor ( " pikachu " , ColumnFamilyOptions ( ) ) ) ;
std : : vector < ColumnFamilyHandle * > handles ;
std : : vector < ColumnFamilyHandle * > handles ;
ASSERT_OK ( DB : : Open ( DBOptions ( ) , dbname2 , column_families , & handles , & db2 ) ) ;
DBOptions db_opts ;
db_opts . env = env_ ;
ASSERT_OK ( DB : : Open ( db_opts , dbname2 , column_families , & handles , & db2 ) ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
// Verify that the keys don't already exist
// Verify that the keys don't already exist
@ -3918,7 +3933,7 @@ TEST_F(DBTest2, TraceWithFilter) {
handles . clear ( ) ;
handles . clear ( ) ;
DB * db3 = nullptr ;
DB * db3 = nullptr ;
ASSERT_OK ( DB : : Open ( DBOptions ( ) , dbname3 , column_families , & handles , & db3 ) ) ;
ASSERT_OK ( DB : : Open ( db_opts , dbname3 , column_families , & handles , & db3 ) ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
env_ - > SleepForMicroseconds ( 100 ) ;
// Verify that the keys don't already exist
// Verify that the keys don't already exist
@ -3974,6 +3989,11 @@ TEST_F(DBTest2, TraceWithFilter) {
TEST_F ( DBTest2 , PinnableSliceAndMmapReads ) {
TEST_F ( DBTest2 , PinnableSliceAndMmapReads ) {
Options options = CurrentOptions ( ) ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
if ( options . env ! = Env : : Default ( ) ) {
ROCKSDB_GTEST_SKIP ( " Test requires default environment " ) ;
return ;
}
options . allow_mmap_reads = true ;
options . allow_mmap_reads = true ;
options . max_open_files = 100 ;
options . max_open_files = 100 ;
options . compression = kNoCompression ;
options . compression = kNoCompression ;
@ -4026,7 +4046,7 @@ TEST_F(DBTest2, DISABLED_IteratorPinnedMemory) {
bbto . cache_index_and_filter_blocks = false ;
bbto . cache_index_and_filter_blocks = false ;
bbto . block_cache = NewLRUCache ( 100000 ) ;
bbto . block_cache = NewLRUCache ( 100000 ) ;
bbto . block_size = 400 ; // small block size
bbto . block_size = 400 ; // small block size
options . table_factory . reset ( new BlockBasedTableFactory ( bbto ) ) ;
options . table_factory . reset ( New BlockBasedTableFactory( bbto ) ) ;
Reopen ( options ) ;
Reopen ( options ) ;
Random rnd ( 301 ) ;
Random rnd ( 301 ) ;
@ -4252,6 +4272,7 @@ TEST_F(DBTest2, TestCompactFiles) {
SyncPoint : : GetInstance ( ) - > EnableProcessing ( ) ;
SyncPoint : : GetInstance ( ) - > EnableProcessing ( ) ;
Options options ;
Options options ;
options . env = env_ ;
options . num_levels = 2 ;
options . num_levels = 2 ;
options . disable_auto_compactions = true ;
options . disable_auto_compactions = true ;
Reopen ( options ) ;
Reopen ( options ) ;
@ -4807,6 +4828,7 @@ TEST_F(DBTest2, BlockBasedTablePrefixIndexSeekForPrev) {
TEST_F ( DBTest2 , PartitionedIndexPrefetchFailure ) {
TEST_F ( DBTest2 , PartitionedIndexPrefetchFailure ) {
Options options = last_options_ ;
Options options = last_options_ ;
options . env = env_ ;
options . max_open_files = 20 ;
options . max_open_files = 20 ;
BlockBasedTableOptions bbto ;
BlockBasedTableOptions bbto ;
bbto . index_type = BlockBasedTableOptions : : IndexType : : kTwoLevelIndexSearch ;
bbto . index_type = BlockBasedTableOptions : : IndexType : : kTwoLevelIndexSearch ;