diff --git a/HISTORY.md b/HISTORY.md index 2d053289e..e5a2e32bc 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -12,6 +12,7 @@ * `rocksdb_level_metadata_t` and its and its get functions & destroy function. * `rocksdb_file_metadata_t` and its and get functions & destroy functions. * Add suggest_compact_range() and suggest_compact_range_cf() to C API. +* When using block cache strict capacity limit (`LRUCache` with `strict_capacity_limit=true`), DB operations now fail with Status code `kAborted` subcode `kMemoryLimit` (`IsMemoryLimit()`) instead of `kIncomplete` (`IsIncomplete()`) when the capacity limit is reached, because Incomplete can mean other specific things for some operations. In more detail, `Cache::Insert()` now returns the updated Status code and this usually propagates through RocksDB to the user on failure. ### Bug Fixes * Fix a bug in which backup/checkpoint can include a WAL deleted by RocksDB. diff --git a/cache/cache_reservation_manager_test.cc b/cache/cache_reservation_manager_test.cc index 87af653bc..e39b974b8 100644 --- a/cache/cache_reservation_manager_test.cc +++ b/cache/cache_reservation_manager_test.cc @@ -147,7 +147,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest, std::size_t new_mem_used = kSmallCacheCapacity + 1; Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used); - EXPECT_EQ(s, Status::Incomplete()) + EXPECT_EQ(s, Status::MemoryLimit()) << "Failed to return status to indicate failure of dummy entry insertion " "during cache reservation on full cache"; EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(), @@ -192,7 +192,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest, // Create cache full again for subsequent tests new_mem_used = kSmallCacheCapacity + 1; s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used); - EXPECT_EQ(s, Status::Incomplete()) + EXPECT_EQ(s, Status::MemoryLimit()) << "Failed to return status to indicate failure of dummy entry insertion " "during cache reservation on full cache"; EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(), diff --git a/cache/cache_test.cc b/cache/cache_test.cc index da357637b..f9acb88f0 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -741,7 +741,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) { cache->SetStrictCapacityLimit(true); Cache::Handle* handle; s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle); - ASSERT_TRUE(s.IsIncomplete()); + ASSERT_TRUE(s.IsMemoryLimit()); ASSERT_EQ(nullptr, handle); ASSERT_EQ(10, cache->GetUsage()); @@ -758,7 +758,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) { ASSERT_NE(nullptr, handles[i]); } s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle); - ASSERT_TRUE(s.IsIncomplete()); + ASSERT_TRUE(s.IsMemoryLimit()); ASSERT_EQ(nullptr, handle); // test insert without handle s = cache2->Insert(extra_key, extra_value, 1, &deleter); diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index 3d34740a0..bfc49cd0d 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -351,11 +351,12 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value, last_reference_list.push_back(tmp); } else { if (table_.GetOccupancy() == table_.GetOccupancyLimit()) { - s = Status::Incomplete( + // TODO: Consider using a distinct status for this case, but usually + // it will be handled the same way as reaching charge capacity limit + s = Status::MemoryLimit( "Insert failed because all slots in the hash table are full."); - // TODO(Guido) Use the correct statuses. } else { - s = Status::Incomplete( + s = Status::MemoryLimit( "Insert failed because the total charge has exceeded the " "capacity."); } diff --git a/cache/fast_lru_cache.cc b/cache/fast_lru_cache.cc index 8ef74f56f..0152b6fbe 100644 --- a/cache/fast_lru_cache.cc +++ b/cache/fast_lru_cache.cc @@ -368,11 +368,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value, last_reference_list.push_back(tmp); } else { if (table_.GetOccupancy() == table_.GetOccupancyLimit()) { - s = Status::Incomplete( + // TODO: Consider using a distinct status for this case, but usually + // it will be handled the same way as reaching charge capacity limit + s = Status::MemoryLimit( "Insert failed because all slots in the hash table are full."); - // TODO(Guido) Use the correct statuses. } else { - s = Status::Incomplete( + s = Status::MemoryLimit( "Insert failed because the total charge has exceeded the " "capacity."); } diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index c3f50f7b9..147ae5bcb 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -332,7 +332,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, delete[] reinterpret_cast(e); *handle = nullptr; } - s = Status::Incomplete("Insert failed due to LRU cache being full."); + s = Status::MemoryLimit("Insert failed due to LRU cache being full."); } } else { // Insert into the cache. Note that the cache might get larger than its diff --git a/db/blob/blob_file_cache_test.cc b/db/blob/blob_file_cache_test.cc index b0ec22a67..b19d78b66 100644 --- a/db/blob/blob_file_cache_test.cc +++ b/db/blob/blob_file_cache_test.cc @@ -254,7 +254,7 @@ TEST_F(BlobFileCacheTest, GetBlobFileReader_CacheFull) { CacheHandleGuard reader; ASSERT_TRUE(blob_file_cache.GetBlobFileReader(blob_file_number, &reader) - .IsIncomplete()); + .IsMemoryLimit()); ASSERT_EQ(reader.GetValue(), nullptr); ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1); ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1); diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index dc7fedfd4..d4ee1d8b2 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -250,7 +250,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { cache->SetStrictCapacityLimit(true); iter = db_->NewIterator(read_options); iter->Seek(std::to_string(kNumBlocks - 1)); - ASSERT_TRUE(iter->status().IsIncomplete()); + ASSERT_TRUE(iter->status().IsMemoryLimit()); CheckCacheCounters(options, 1, 0, 0, 1); delete iter; iter = nullptr; @@ -333,8 +333,10 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { ASSERT_EQ(usage, cache->GetPinnedUsage()); // Load last key block. - ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.", - Get(std::to_string(kNumBlocks - 1))); + ASSERT_EQ( + "Operation aborted: Memory limit reached: Insert failed due to LRU cache " + "being full.", + Get(std::to_string(kNumBlocks - 1))); // Failure will also record the miss counter. CheckCacheCounters(options, 1, 0, 0, 1); CheckCompressedCacheCounters(options, 1, 0, 1, 0); diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index 843ea5e74..07a5591a5 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -117,7 +117,7 @@ struct CompressionOptions { // // The amount of data buffered can be limited by `max_dict_buffer_bytes`. This // buffered memory is charged to the block cache when there is a block cache. - // If block cache insertion fails with `Status::Incomplete` (i.e., it is + // If block cache insertion fails with `Status::MemoryLimit` (i.e., it is // full), we finalize the dictionary with whatever data we have and then stop // buffering. // diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 03f8ac51f..6194b0c66 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -288,7 +288,7 @@ class Cache { // Insert a mapping from key->value into the volatile cache only // and assign it with the specified charge against the total cache capacity. // If strict_capacity_limit is true and cache reaches its full capacity, - // return Status::Incomplete. + // return Status::MemoryLimit. // // If handle is not nullptr, returns a handle that corresponds to the // mapping. The caller must call this->Release(handle) when the returned @@ -446,7 +446,7 @@ class Cache { // Insert a mapping from key->value into the cache and assign it // the specified charge against the total cache capacity. // If strict_capacity_limit is true and cache reaches its full capacity, - // return Status::Incomplete. + // return Status::MemoryLimit. // // The helper argument is saved by the cache and will be used when the // inserted object is evicted or promoted to the secondary cache. It, diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 80e3aefb2..a5f58271c 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -944,7 +944,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) { Status s = r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation( r->data_begin_offset); - exceeds_global_block_cache_limit = s.IsIncomplete(); + exceeds_global_block_cache_limit = s.IsMemoryLimit(); } if (exceeds_buffer_limit || exceeds_global_block_cache_limit) { diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index 3eb1505e0..a67ca5906 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -743,7 +743,7 @@ Status BlockBasedTable::Open( std::size_t mem_usage = new_table->ApproximateMemoryUsage(); s = table_reader_cache_res_mgr->MakeCacheReservation( mem_usage, &(rep->table_reader_cache_res_handle)); - if (s.IsIncomplete()) { + if (s.IsMemoryLimit()) { s = Status::MemoryLimit( "Can't allocate " + kCacheEntryRoleToCamelString[static_cast( diff --git a/table/block_based/filter_policy.cc b/table/block_based/filter_policy.cc index 020ce87d2..2c220da98 100644 --- a/table/block_based/filter_policy.cc +++ b/table/block_based/filter_policy.cc @@ -662,7 +662,7 @@ class Standard128RibbonBitsBuilder : public XXPH3FilterBitsBuilder { bytes_banding, &banding_res_handle); } - if (status_banding_cache_res.IsIncomplete()) { + if (status_banding_cache_res.IsMemoryLimit()) { ROCKS_LOG_WARN(info_log_, "Cache charging for Ribbon filter banding failed due " "to cache full"); diff --git a/table/get_context.cc b/table/get_context.cc index b0847ea5d..d2c6ab83a 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -415,6 +415,7 @@ bool GetContext::GetBlobValue(const Slice& blob_index, user_key_, blob_index, prefetch_buffer, blob_value, bytes_read); if (!status.ok()) { if (status.IsIncomplete()) { + // FIXME: this code is not covered by unit tests MarkKeyMayExist(); return false; } diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index ceb91c154..2e37cd347 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -116,7 +116,7 @@ TEST_F(SimCacheTest, SimCache) { simCache->SetStrictCapacityLimit(true); iter = db_->NewIterator(read_options); iter->Seek(std::to_string(kNumBlocks * 2 - 1)); - ASSERT_TRUE(iter->status().IsIncomplete()); + ASSERT_TRUE(iter->status().IsMemoryLimit()); CheckCacheCounters(options, 1, 0, 0, 1); delete iter; iter = nullptr;