diff --git a/HISTORY.md b/HISTORY.md index 439223c2a..f2fb97f6d 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -8,11 +8,15 @@ * Fix memory corruption error in scans if async_io is enabled. Memory corruption happened if there is IOError while reading the data leading to empty buffer and other buffer already in progress of async read goes again for reading. * Fix failed memtable flush retry bug that could cause wrongly ordered updates, which would surface to writers as `Status::Corruption` in case of `force_consistency_checks=true` (default). It affects use cases that enable both parallel flush (`max_background_flushes > 1` or `max_background_jobs >= 8`) and non-default memtable count (`max_write_buffer_number > 2`). * Fixed an issue where the `READ_NUM_MERGE_OPERANDS` ticker was not updated when the base key-value or tombstone was read from an SST file. +* Fixed a memory safety bug when using a SecondaryCache with `block_cache_compressed`. `block_cache_compressed` no longer attempts to use SecondaryCache features. * Fixed a regression in scan for async_io. During seek, valid buffers were getting cleared causing a regression. ### New Features * Add basic support for user-defined timestamp to Merge (#10819). +### Public API Changes +* Marked `block_cache_compressed` as a deprecated feature. Use SecondaryCache instead. + ## 7.8.0 (10/22/2022) ### New Features * `DeleteRange()` now supports user-defined timestamp. diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index e5039394b..b7e436c10 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -1888,6 +1888,43 @@ TEST_F(DBSecondaryCacheTest, SecondaryCacheFailureTest) { Destroy(options); } +TEST_F(DBSecondaryCacheTest, TestSecondaryWithCompressedCache) { + if (!Snappy_Supported()) { + ROCKSDB_GTEST_SKIP("Compressed cache test requires snappy support"); + return; + } + LRUCacheOptions opts(2000 /* capacity */, 0 /* num_shard_bits */, + false /* strict_capacity_limit */, + 0.5 /* high_pri_pool_ratio */, + nullptr /* memory_allocator */, kDefaultToAdaptiveMutex, + kDontChargeCacheMetadata); + std::shared_ptr secondary_cache( + new TestSecondaryCache(2048 * 1024)); + opts.secondary_cache = secondary_cache; + std::shared_ptr cache = NewLRUCache(opts); + BlockBasedTableOptions table_options; + table_options.block_cache_compressed = cache; + table_options.no_block_cache = true; + table_options.block_size = 1234; + Options options = GetDefaultOptions(); + options.compression = kSnappyCompression; + options.create_if_missing = true; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + DestroyAndReopen(options); + Random rnd(301); + const int N = 6; + for (int i = 0; i < N; i++) { + // Partly compressible + std::string p_v = rnd.RandomString(507) + std::string(500, ' '); + ASSERT_OK(Put(Key(i), p_v)); + } + ASSERT_OK(Flush()); + for (int i = 0; i < 2 * N; i++) { + std::string v = Get(Key(i % N)); + ASSERT_EQ(1007, v.size()); + } +} + TEST_F(LRUCacheSecondaryCacheTest, BasicWaitAllTest) { LRUCacheOptions opts(1024 /* capacity */, 2 /* num_shard_bits */, false /* strict_capacity_limit */, diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index c5a545eef..3a2bf2629 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -266,6 +266,9 @@ struct BlockBasedTableOptions { // IF NULL, no page cache is used std::shared_ptr persistent_cache = nullptr; + // DEPRECATED: This feature is planned for removal in a future release. + // Use SecondaryCache instead. + // // If non-NULL use the specified cache for compressed blocks. // If NULL, rocksdb will not use a compressed block cache. // Note: though it looks similar to `block_cache`, RocksDB doesn't put the diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index c79a44830..fed69af07 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -22,6 +22,7 @@ #include #include "cache/cache_entry_roles.h" +#include "cache/cache_helpers.h" #include "cache/cache_key.h" #include "cache/cache_reservation_manager.h" #include "db/dbformat.h" @@ -1417,15 +1418,6 @@ IOStatus BlockBasedTableBuilder::io_status() const { return rep_->GetIOStatus(); } -namespace { -// Delete the entry resided in the cache. -template -void DeleteEntryCached(const Slice& /*key*/, void* value) { - auto entry = reinterpret_cast(value); - delete entry; -} -} // namespace - // // Make a copy of the block contents and insert into compressed block cache // @@ -1454,7 +1446,7 @@ Status BlockBasedTableBuilder::InsertBlockInCompressedCache( s = block_cache_compressed->Insert( key.AsSlice(), block_contents_to_cache, block_contents_to_cache->ApproximateMemoryUsage(), - &DeleteEntryCached); + &DeleteCacheEntry); if (s.ok()) { RecordTick(rep_->ioptions.stats, BLOCK_CACHE_COMPRESSED_ADD); } else { diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index 5ea98634d..43962ba1d 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -1331,18 +1331,8 @@ Status BlockBasedTable::GetDataBlockFromCache( assert(!cache_key.empty()); BlockContents contents; - if (rep_->ioptions.lowest_used_cache_tier == - CacheTier::kNonVolatileBlockTier) { - Cache::CreateCallback create_cb_special = GetCreateCallback( - read_amp_bytes_per_bit, statistics, using_zstd, filter_policy); - block_cache_compressed_handle = block_cache_compressed->Lookup( - cache_key, - BlocklikeTraits::GetCacheItemHelper(block_type), - create_cb_special, priority, true); - } else { - block_cache_compressed_handle = - block_cache_compressed->Lookup(cache_key, statistics); - } + block_cache_compressed_handle = + block_cache_compressed->Lookup(cache_key, statistics); // if we found in the compressed cache, then uncompress and insert into // uncompressed cache @@ -1466,15 +1456,15 @@ Status BlockBasedTable::PutDataBlockToCache( auto block_cont_for_comp_cache = std::make_unique(std::move(block_contents)); size_t charge = block_cont_for_comp_cache->ApproximateMemoryUsage(); - s = InsertEntryToCache( - rep_->ioptions.lowest_used_cache_tier, block_cache_compressed, - cache_key, - BlocklikeTraits::GetCacheItemHelper(block_type), - std::move(block_cont_for_comp_cache), charge, nullptr, + + s = block_cache_compressed->Insert( + cache_key, block_cont_for_comp_cache.get(), charge, + &DeleteCacheEntry, nullptr /*handle*/, Cache::Priority::LOW); if (s.ok()) { - // Avoid the following code to delete this cached block. + // Cache took ownership + block_cont_for_comp_cache.release(); RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD); } else { RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES); @@ -1843,16 +1833,8 @@ Status BlockBasedTable::RetrieveBlock( return s; } -// Explicitly instantiate templates for both "blocklike" types we use. +// Explicitly instantiate templates for each "blocklike" type we use. // This makes it possible to keep the template definitions in the .cc file. -template Status BlockBasedTable::RetrieveBlock( - FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro, - const BlockHandle& handle, const UncompressionDict& uncompression_dict, - CachableEntry* out_parsed_block, BlockType block_type, - GetContext* get_context, BlockCacheLookupContext* lookup_context, - bool for_compaction, bool use_cache, bool wait_for_cache, - bool async_read) const; - template Status BlockBasedTable::RetrieveBlock( FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro, const BlockHandle& handle, const UncompressionDict& uncompression_dict, diff --git a/table/block_based/block_like_traits.h b/table/block_based/block_like_traits.h index aeb255114..d406dbb5d 100644 --- a/table/block_based/block_like_traits.h +++ b/table/block_based/block_like_traits.h @@ -40,45 +40,6 @@ Cache::CreateCallback GetCreateCallback(size_t read_amp_bytes_per_bit, }; } -template <> -class BlocklikeTraits { - public: - static BlockContents* Create(BlockContents&& contents, - size_t /* read_amp_bytes_per_bit */, - Statistics* /* statistics */, - bool /* using_zstd */, - const FilterPolicy* /* filter_policy */) { - return new BlockContents(std::move(contents)); - } - - static uint32_t GetNumRestarts(const BlockContents& /* contents */) { - return 0; - } - - static size_t SizeCallback(void* obj) { - assert(obj != nullptr); - BlockContents* ptr = static_cast(obj); - return ptr->data.size(); - } - - static Status SaveToCallback(void* from_obj, size_t from_offset, - size_t length, void* out) { - assert(from_obj != nullptr); - BlockContents* ptr = static_cast(from_obj); - const char* buf = ptr->data.data(); - assert(length == ptr->data.size()); - (void)from_offset; - memcpy(out, buf, length); - return Status::OK(); - } - - static Cache::CacheItemHelper* GetCacheItemHelper(BlockType /*block_type*/) { - // E.g. compressed cache - return GetCacheItemHelperForRole(); - } -}; - template <> class BlocklikeTraits { public: diff --git a/table/block_based/filter_block_reader_common.cc b/table/block_based/filter_block_reader_common.cc index fc00afb5f..7dc49e83e 100644 --- a/table/block_based/filter_block_reader_common.cc +++ b/table/block_based/filter_block_reader_common.cc @@ -158,7 +158,6 @@ bool FilterBlockReaderCommon::IsFilterCompatible( // Explicitly instantiate templates for both "blocklike" types we use. // This makes it possible to keep the template definitions in the .cc file. -template class FilterBlockReaderCommon; template class FilterBlockReaderCommon; template class FilterBlockReaderCommon;