Have Cache use Status::MemoryLimit (#10262)

Summary:
I noticed it would clean up some things to have Cache::Insert()
return our MemoryLimit Status instead of Incomplete for the case in
which the capacity limit is reached. I suspect this fixes some existing but
unknown bugs where this Incomplete could be confused with other uses
of Incomplete, especially no_io cases. This is the most suspicious case I
noticed, but was not able to reproduce a bug, in part because the existing
code is not covered by unit tests (FIXME added): 57adbf0e91/table/get_context.cc (L397)

I audited all the existing uses of IsIncomplete and updated those that
seemed relevant.

HISTORY updated with a clear warning to users of strict_capacity_limit=true
to update uses of `IsIncomplete()`

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10262

Test Plan: updated unit tests

Reviewed By: hx235

Differential Revision: D37473155

Pulled By: pdillinger

fbshipit-source-id: 4bd9d9353ccddfe286b03ebd0652df8ce20f99cb
main
Peter Dillinger 2 years ago committed by Facebook GitHub Bot
parent 071fe39c05
commit e6c5e0ab9a
  1. 1
      HISTORY.md
  2. 4
      cache/cache_reservation_manager_test.cc
  3. 4
      cache/cache_test.cc
  4. 7
      cache/clock_cache.cc
  5. 7
      cache/fast_lru_cache.cc
  6. 2
      cache/lru_cache.cc
  7. 2
      db/blob/blob_file_cache_test.cc
  8. 6
      db/db_block_cache_test.cc
  9. 2
      include/rocksdb/advanced_options.h
  10. 4
      include/rocksdb/cache.h
  11. 2
      table/block_based/block_based_table_builder.cc
  12. 2
      table/block_based/block_based_table_reader.cc
  13. 2
      table/block_based/filter_policy.cc
  14. 1
      table/get_context.cc
  15. 2
      utilities/simulator_cache/sim_cache_test.cc

@ -12,6 +12,7 @@
* `rocksdb_level_metadata_t` and its and its get functions & destroy function. * `rocksdb_level_metadata_t` and its and its get functions & destroy function.
* `rocksdb_file_metadata_t` and its and get functions & destroy functions. * `rocksdb_file_metadata_t` and its and get functions & destroy functions.
* Add suggest_compact_range() and suggest_compact_range_cf() to C API. * Add suggest_compact_range() and suggest_compact_range_cf() to C API.
* When using block cache strict capacity limit (`LRUCache` with `strict_capacity_limit=true`), DB operations now fail with Status code `kAborted` subcode `kMemoryLimit` (`IsMemoryLimit()`) instead of `kIncomplete` (`IsIncomplete()`) when the capacity limit is reached, because Incomplete can mean other specific things for some operations. In more detail, `Cache::Insert()` now returns the updated Status code and this usually propagates through RocksDB to the user on failure.
### Bug Fixes ### Bug Fixes
* Fix a bug in which backup/checkpoint can include a WAL deleted by RocksDB. * Fix a bug in which backup/checkpoint can include a WAL deleted by RocksDB.

@ -147,7 +147,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
std::size_t new_mem_used = kSmallCacheCapacity + 1; std::size_t new_mem_used = kSmallCacheCapacity + 1;
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used); Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
EXPECT_EQ(s, Status::Incomplete()) EXPECT_EQ(s, Status::MemoryLimit())
<< "Failed to return status to indicate failure of dummy entry insertion " << "Failed to return status to indicate failure of dummy entry insertion "
"during cache reservation on full cache"; "during cache reservation on full cache";
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(), EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
@ -192,7 +192,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
// Create cache full again for subsequent tests // Create cache full again for subsequent tests
new_mem_used = kSmallCacheCapacity + 1; new_mem_used = kSmallCacheCapacity + 1;
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used); s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
EXPECT_EQ(s, Status::Incomplete()) EXPECT_EQ(s, Status::MemoryLimit())
<< "Failed to return status to indicate failure of dummy entry insertion " << "Failed to return status to indicate failure of dummy entry insertion "
"during cache reservation on full cache"; "during cache reservation on full cache";
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(), EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),

@ -741,7 +741,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
cache->SetStrictCapacityLimit(true); cache->SetStrictCapacityLimit(true);
Cache::Handle* handle; Cache::Handle* handle;
s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle); s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
ASSERT_TRUE(s.IsIncomplete()); ASSERT_TRUE(s.IsMemoryLimit());
ASSERT_EQ(nullptr, handle); ASSERT_EQ(nullptr, handle);
ASSERT_EQ(10, cache->GetUsage()); ASSERT_EQ(10, cache->GetUsage());
@ -758,7 +758,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
ASSERT_NE(nullptr, handles[i]); ASSERT_NE(nullptr, handles[i]);
} }
s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle); s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
ASSERT_TRUE(s.IsIncomplete()); ASSERT_TRUE(s.IsMemoryLimit());
ASSERT_EQ(nullptr, handle); ASSERT_EQ(nullptr, handle);
// test insert without handle // test insert without handle
s = cache2->Insert(extra_key, extra_value, 1, &deleter); s = cache2->Insert(extra_key, extra_value, 1, &deleter);

@ -351,11 +351,12 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
last_reference_list.push_back(tmp); last_reference_list.push_back(tmp);
} else { } else {
if (table_.GetOccupancy() == table_.GetOccupancyLimit()) { if (table_.GetOccupancy() == table_.GetOccupancyLimit()) {
s = Status::Incomplete( // TODO: Consider using a distinct status for this case, but usually
// it will be handled the same way as reaching charge capacity limit
s = Status::MemoryLimit(
"Insert failed because all slots in the hash table are full."); "Insert failed because all slots in the hash table are full.");
// TODO(Guido) Use the correct statuses.
} else { } else {
s = Status::Incomplete( s = Status::MemoryLimit(
"Insert failed because the total charge has exceeded the " "Insert failed because the total charge has exceeded the "
"capacity."); "capacity.");
} }

@ -368,11 +368,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
last_reference_list.push_back(tmp); last_reference_list.push_back(tmp);
} else { } else {
if (table_.GetOccupancy() == table_.GetOccupancyLimit()) { if (table_.GetOccupancy() == table_.GetOccupancyLimit()) {
s = Status::Incomplete( // TODO: Consider using a distinct status for this case, but usually
// it will be handled the same way as reaching charge capacity limit
s = Status::MemoryLimit(
"Insert failed because all slots in the hash table are full."); "Insert failed because all slots in the hash table are full.");
// TODO(Guido) Use the correct statuses.
} else { } else {
s = Status::Incomplete( s = Status::MemoryLimit(
"Insert failed because the total charge has exceeded the " "Insert failed because the total charge has exceeded the "
"capacity."); "capacity.");
} }

@ -332,7 +332,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
delete[] reinterpret_cast<char*>(e); delete[] reinterpret_cast<char*>(e);
*handle = nullptr; *handle = nullptr;
} }
s = Status::Incomplete("Insert failed due to LRU cache being full."); s = Status::MemoryLimit("Insert failed due to LRU cache being full.");
} }
} else { } else {
// Insert into the cache. Note that the cache might get larger than its // Insert into the cache. Note that the cache might get larger than its

@ -254,7 +254,7 @@ TEST_F(BlobFileCacheTest, GetBlobFileReader_CacheFull) {
CacheHandleGuard<BlobFileReader> reader; CacheHandleGuard<BlobFileReader> reader;
ASSERT_TRUE(blob_file_cache.GetBlobFileReader(blob_file_number, &reader) ASSERT_TRUE(blob_file_cache.GetBlobFileReader(blob_file_number, &reader)
.IsIncomplete()); .IsMemoryLimit());
ASSERT_EQ(reader.GetValue(), nullptr); ASSERT_EQ(reader.GetValue(), nullptr);
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1); ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1); ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1);

@ -250,7 +250,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
cache->SetStrictCapacityLimit(true); cache->SetStrictCapacityLimit(true);
iter = db_->NewIterator(read_options); iter = db_->NewIterator(read_options);
iter->Seek(std::to_string(kNumBlocks - 1)); iter->Seek(std::to_string(kNumBlocks - 1));
ASSERT_TRUE(iter->status().IsIncomplete()); ASSERT_TRUE(iter->status().IsMemoryLimit());
CheckCacheCounters(options, 1, 0, 0, 1); CheckCacheCounters(options, 1, 0, 0, 1);
delete iter; delete iter;
iter = nullptr; iter = nullptr;
@ -333,7 +333,9 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
ASSERT_EQ(usage, cache->GetPinnedUsage()); ASSERT_EQ(usage, cache->GetPinnedUsage());
// Load last key block. // Load last key block.
ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.", ASSERT_EQ(
"Operation aborted: Memory limit reached: Insert failed due to LRU cache "
"being full.",
Get(std::to_string(kNumBlocks - 1))); Get(std::to_string(kNumBlocks - 1)));
// Failure will also record the miss counter. // Failure will also record the miss counter.
CheckCacheCounters(options, 1, 0, 0, 1); CheckCacheCounters(options, 1, 0, 0, 1);

@ -117,7 +117,7 @@ struct CompressionOptions {
// //
// The amount of data buffered can be limited by `max_dict_buffer_bytes`. This // The amount of data buffered can be limited by `max_dict_buffer_bytes`. This
// buffered memory is charged to the block cache when there is a block cache. // buffered memory is charged to the block cache when there is a block cache.
// If block cache insertion fails with `Status::Incomplete` (i.e., it is // If block cache insertion fails with `Status::MemoryLimit` (i.e., it is
// full), we finalize the dictionary with whatever data we have and then stop // full), we finalize the dictionary with whatever data we have and then stop
// buffering. // buffering.
// //

@ -288,7 +288,7 @@ class Cache {
// Insert a mapping from key->value into the volatile cache only // Insert a mapping from key->value into the volatile cache only
// and assign it with the specified charge against the total cache capacity. // and assign it with the specified charge against the total cache capacity.
// If strict_capacity_limit is true and cache reaches its full capacity, // If strict_capacity_limit is true and cache reaches its full capacity,
// return Status::Incomplete. // return Status::MemoryLimit.
// //
// If handle is not nullptr, returns a handle that corresponds to the // If handle is not nullptr, returns a handle that corresponds to the
// mapping. The caller must call this->Release(handle) when the returned // mapping. The caller must call this->Release(handle) when the returned
@ -446,7 +446,7 @@ class Cache {
// Insert a mapping from key->value into the cache and assign it // Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity. // the specified charge against the total cache capacity.
// If strict_capacity_limit is true and cache reaches its full capacity, // If strict_capacity_limit is true and cache reaches its full capacity,
// return Status::Incomplete. // return Status::MemoryLimit.
// //
// The helper argument is saved by the cache and will be used when the // The helper argument is saved by the cache and will be used when the
// inserted object is evicted or promoted to the secondary cache. It, // inserted object is evicted or promoted to the secondary cache. It,

@ -944,7 +944,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
Status s = Status s =
r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation( r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation(
r->data_begin_offset); r->data_begin_offset);
exceeds_global_block_cache_limit = s.IsIncomplete(); exceeds_global_block_cache_limit = s.IsMemoryLimit();
} }
if (exceeds_buffer_limit || exceeds_global_block_cache_limit) { if (exceeds_buffer_limit || exceeds_global_block_cache_limit) {

@ -743,7 +743,7 @@ Status BlockBasedTable::Open(
std::size_t mem_usage = new_table->ApproximateMemoryUsage(); std::size_t mem_usage = new_table->ApproximateMemoryUsage();
s = table_reader_cache_res_mgr->MakeCacheReservation( s = table_reader_cache_res_mgr->MakeCacheReservation(
mem_usage, &(rep->table_reader_cache_res_handle)); mem_usage, &(rep->table_reader_cache_res_handle));
if (s.IsIncomplete()) { if (s.IsMemoryLimit()) {
s = Status::MemoryLimit( s = Status::MemoryLimit(
"Can't allocate " + "Can't allocate " +
kCacheEntryRoleToCamelString[static_cast<std::uint32_t>( kCacheEntryRoleToCamelString[static_cast<std::uint32_t>(

@ -662,7 +662,7 @@ class Standard128RibbonBitsBuilder : public XXPH3FilterBitsBuilder {
bytes_banding, &banding_res_handle); bytes_banding, &banding_res_handle);
} }
if (status_banding_cache_res.IsIncomplete()) { if (status_banding_cache_res.IsMemoryLimit()) {
ROCKS_LOG_WARN(info_log_, ROCKS_LOG_WARN(info_log_,
"Cache charging for Ribbon filter banding failed due " "Cache charging for Ribbon filter banding failed due "
"to cache full"); "to cache full");

@ -415,6 +415,7 @@ bool GetContext::GetBlobValue(const Slice& blob_index,
user_key_, blob_index, prefetch_buffer, blob_value, bytes_read); user_key_, blob_index, prefetch_buffer, blob_value, bytes_read);
if (!status.ok()) { if (!status.ok()) {
if (status.IsIncomplete()) { if (status.IsIncomplete()) {
// FIXME: this code is not covered by unit tests
MarkKeyMayExist(); MarkKeyMayExist();
return false; return false;
} }

@ -116,7 +116,7 @@ TEST_F(SimCacheTest, SimCache) {
simCache->SetStrictCapacityLimit(true); simCache->SetStrictCapacityLimit(true);
iter = db_->NewIterator(read_options); iter = db_->NewIterator(read_options);
iter->Seek(std::to_string(kNumBlocks * 2 - 1)); iter->Seek(std::to_string(kNumBlocks * 2 - 1));
ASSERT_TRUE(iter->status().IsIncomplete()); ASSERT_TRUE(iter->status().IsMemoryLimit());
CheckCacheCounters(options, 1, 0, 0, 1); CheckCacheCounters(options, 1, 0, 0, 1);
delete iter; delete iter;
iter = nullptr; iter = nullptr;

Loading…
Cancel
Save