diff --git a/HISTORY.md b/HISTORY.md index d3660ee64..79feac37c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,9 @@ # Rocksdb Change Log ## Unreleased +### Default Option Change +* LRUCacheOptions.high_pri_pool_ratio is set to 0.5 (previously 0.0) by default, which means that by default midpoint insertion is enabled. The same change is made for the default value of high_pri_pool_ratio argument in NewLRUCache(). When block cache is not explictly created, the small block cache created by BlockBasedTable will still has this option to be 0.0. +* Change BlockBasedTableOptions.cache_index_and_filter_blocks_with_high_priority's default value from false to true. + ### Public API Change * Now DB::Close() will return Aborted() error when there is unreleased snapshot. Users can retry after all snapshots are released. * Partitions of partitioned indexes no longer affect the read amplification statistics. diff --git a/cache/cache_test.cc b/cache/cache_test.cc index d7b191bb3..46ce78db6 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -90,7 +90,7 @@ class CacheTest : public testing::TestWithParam { bool strict_capacity_limit) { auto type = GetParam(); if (type == kLRU) { - return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit); + return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit, 0.0); } if (type == kClock) { return NewClockCache(capacity, num_shard_bits, strict_capacity_limit); diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 8fb691559..410c2cf82 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -59,7 +59,7 @@ struct LRUCacheOptions { // // See also // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority. - double high_pri_pool_ratio = 0.0; + double high_pri_pool_ratio = 0.5; // If non-nullptr will use this allocator instead of system allocator when // allocating memory for cache blocks. Call this method before you start using @@ -99,7 +99,7 @@ struct LRUCacheOptions { // will be at least 512KB and number of shard bits will not exceed 6. extern std::shared_ptr NewLRUCache( size_t capacity, int num_shard_bits = -1, - bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.0, + bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5, std::shared_ptr memory_allocator = nullptr, bool use_adaptive_mutex = kDefaultToAdaptiveMutex); diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 929239100..712c604ad 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -74,7 +74,7 @@ struct BlockBasedTableOptions { // blocks with high priority. If set to true, depending on implementation of // block cache, index and filter blocks may be less likely to be evicted // than data blocks. - bool cache_index_and_filter_blocks_with_high_priority = false; + bool cache_index_and_filter_blocks_with_high_priority = true; // if cache_index_and_filter_blocks is true and the below is true, then // filter and index blocks are stored in the cache, but a reference is diff --git a/options/options_test.cc b/options/options_test.cc index 24aeec99e..823a9c1e0 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -617,8 +617,9 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) { new_opt.block_cache)->GetNumShardBits(), GetDefaultCacheShardBits(new_opt.block_cache->GetCapacity())); ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false); - ASSERT_EQ(std::dynamic_pointer_cast( - new_opt.block_cache)->GetHighPriPoolRatio(), 0.0); + ASSERT_EQ(std::dynamic_pointer_cast(new_opt.block_cache) + ->GetHighPriPoolRatio(), + 0.5); ASSERT_TRUE(new_opt.block_cache_compressed != nullptr); ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 2*1024UL*1024UL); // Default values @@ -627,16 +628,17 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) { GetDefaultCacheShardBits( new_opt.block_cache_compressed->GetCapacity())); ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false); - ASSERT_EQ(std::dynamic_pointer_cast( - new_opt.block_cache_compressed)->GetHighPriPoolRatio(), - 0.0); + ASSERT_EQ(std::dynamic_pointer_cast(new_opt.block_cache_compressed) + ->GetHighPriPoolRatio(), + 0.5); // Set couple of block cache options. - ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt, - "block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};" - "block_cache_compressed={num_shard_bits=5;" - "high_pri_pool_ratio=0.5;}", - &new_opt)); + ASSERT_OK(GetBlockBasedTableOptionsFromString( + table_opt, + "block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};" + "block_cache_compressed={num_shard_bits=5;" + "high_pri_pool_ratio=0.0;}", + &new_opt)); ASSERT_EQ(new_opt.block_cache->GetCapacity(), 0); ASSERT_EQ(std::dynamic_pointer_cast( new_opt.block_cache)->GetNumShardBits(), 5); @@ -648,9 +650,9 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) { ASSERT_EQ(std::dynamic_pointer_cast( new_opt.block_cache_compressed)->GetNumShardBits(), 5); ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), false); - ASSERT_EQ(std::dynamic_pointer_cast( - new_opt.block_cache_compressed)->GetHighPriPoolRatio(), - 0.5); + ASSERT_EQ(std::dynamic_pointer_cast(new_opt.block_cache_compressed) + ->GetHighPriPoolRatio(), + 0.0); // Set couple of block cache options. ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt, @@ -664,16 +666,17 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) { ASSERT_EQ(std::dynamic_pointer_cast( new_opt.block_cache)->GetNumShardBits(), 4); ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true); - ASSERT_EQ(std::dynamic_pointer_cast( - new_opt.block_cache)->GetHighPriPoolRatio(), 0.0); + ASSERT_EQ(std::dynamic_pointer_cast(new_opt.block_cache) + ->GetHighPriPoolRatio(), + 0.5); ASSERT_TRUE(new_opt.block_cache_compressed != nullptr); ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL*1024UL); ASSERT_EQ(std::dynamic_pointer_cast( new_opt.block_cache_compressed)->GetNumShardBits(), 4); ASSERT_EQ(new_opt.block_cache_compressed->HasStrictCapacityLimit(), true); - ASSERT_EQ(std::dynamic_pointer_cast( - new_opt.block_cache_compressed)->GetHighPriPoolRatio(), - 0.0); + ASSERT_EQ(std::dynamic_pointer_cast(new_opt.block_cache_compressed) + ->GetHighPriPoolRatio(), + 0.5); } #endif // !ROCKSDB_LITE diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 96812e233..9dca2a6f0 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -167,7 +167,12 @@ BlockBasedTableFactory::BlockBasedTableFactory( if (table_options_.no_block_cache) { table_options_.block_cache.reset(); } else if (table_options_.block_cache == nullptr) { - table_options_.block_cache = NewLRUCache(8 << 20); + LRUCacheOptions co; + co.capacity = 8 << 20; + // It makes little sense to pay overhead for mid-point insertion while the + // block size is only 8MB. + co.high_pri_pool_ratio = 0.0; + table_options_.block_cache = NewLRUCache(co); } if (table_options_.block_size_deviation < 0 || table_options_.block_size_deviation > 100) { diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index e73b0c08c..017d6126c 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -2626,12 +2626,11 @@ void BlockBasedTableIterator::SeekImpl( CheckOutOfBound(); if (target) { - assert( - !Valid() || - ((block_type_ == BlockType::kIndex && - !table_->get_rep()->index_key_includes_seq) - ? (user_comparator_.Compare(ExtractUserKey(*target), key()) <= 0) - : (icomp_.Compare(*target, key()) <= 0))); + assert(!Valid() || ((block_type_ == BlockType::kIndex && + !table_->get_rep()->index_key_includes_seq) + ? (user_comparator_.Compare(ExtractUserKey(*target), + key()) <= 0) + : (icomp_.Compare(*target, key()) <= 0))); } } @@ -2954,8 +2953,8 @@ InternalIterator* BlockBasedTable::NewIterator( /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context), !skip_filters && !read_options.total_order_seek && prefix_extractor != nullptr, - need_upper_bound_check, prefix_extractor, BlockType::kData, - caller, compaction_readahead_size); + need_upper_bound_check, prefix_extractor, BlockType::kData, caller, + compaction_readahead_size); } else { auto* mem = arena->AllocateAligned(sizeof(BlockBasedTableIterator)); @@ -2966,8 +2965,8 @@ InternalIterator* BlockBasedTable::NewIterator( &lookup_context), !skip_filters && !read_options.total_order_seek && prefix_extractor != nullptr, - need_upper_bound_check, prefix_extractor, BlockType::kData, - caller, compaction_readahead_size); + need_upper_bound_check, prefix_extractor, BlockType::kData, caller, + compaction_readahead_size); } } @@ -3125,8 +3124,8 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key, DataBlockIter biter; uint64_t referenced_data_size = 0; NewDataBlockIterator( - read_options, v.handle, &biter, BlockType::kData, - get_context, &lookup_data_block_context, + read_options, v.handle, &biter, BlockType::kData, get_context, + &lookup_data_block_context, /*s=*/Status(), /*prefetch_buffer*/ nullptr); if (no_io && biter.status().IsIncomplete()) { @@ -3278,8 +3277,8 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options, offset = iiter->value().handle.offset(); biter.Invalidate(Status::OK()); NewDataBlockIterator( - read_options, v.handle, &biter, BlockType::kData, - get_context, &lookup_data_block_context, Status(), nullptr); + read_options, v.handle, &biter, BlockType::kData, get_context, + &lookup_data_block_context, Status(), nullptr); reusing_block = false; }