From c78a87cd71687351e1df39bf6898005671f6bd51 Mon Sep 17 00:00:00 2001 From: sdong Date: Tue, 24 May 2022 13:31:16 -0700 Subject: [PATCH] Avoid malloc_usable_size() call inside LRU Cache mutex (#10026) Summary: In LRU Cache mutex, we sometimes call malloc_usable_size() to calculate memory used by the metadata object. We prevent it by saving the charge + metadata size, rather than charge, inside the metadata itself. Within the mutex, usually only total charge is needed so we don't need to repeat. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10026 Test Plan: Run existing tests. Reviewed By: pdillinger Differential Revision: D36556253 fbshipit-source-id: f60c96d13cde3af77732e5548e4eac4182fa9801 --- HISTORY.md | 2 ++ cache/fast_lru_cache.cc | 57 +++++++++++++++----------------- cache/fast_lru_cache.h | 29 +++++++++++----- cache/lru_cache.cc | 73 ++++++++++++++++++++--------------------- cache/lru_cache.h | 31 ++++++++++++----- 5 files changed, 107 insertions(+), 85 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 66964df4c..5eab805ff 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -44,6 +44,8 @@ * ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`). * During Flush write or Compaction write/read, the WriteController is used to determine whether DB writes are stalled or slowed down. The priority (Env::IOPriority) can then be determined accordingly and be passed in IOOptions to the file system. +### Performance Improvements +* Avoid calling malloc_usable_size() in LRU Cache's mutex. ## 7.2.0 (04/15/2022) ### Bug Fixes diff --git a/cache/fast_lru_cache.cc b/cache/fast_lru_cache.cc index 10ae7367f..3176cbf06 100644 --- a/cache/fast_lru_cache.cc +++ b/cache/fast_lru_cache.cc @@ -137,9 +137,8 @@ void LRUCacheShard::EraseUnRefEntries() { LRU_Remove(old); table_.Remove(old->key(), old->hash); old->SetInCache(false); - size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; last_reference_list.push_back(old); } } @@ -177,8 +176,10 @@ void LRUCacheShard::ApplyToSomeEntries( } table_.ApplyToEntriesRange( - [callback](LRUHandle* h) { - callback(h->key(), h->value, h->charge, h->deleter); + [callback, + metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) { + callback(h->key(), h->value, h->GetCharge(metadata_charge_policy), + h->deleter); }, index_begin, index_end); } @@ -189,21 +190,19 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) { e->next->prev = e->prev; e->prev->next = e->next; e->prev = e->next = nullptr; - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(lru_usage_ >= total_charge); - lru_usage_ -= total_charge; + assert(lru_usage_ >= e->total_charge); + lru_usage_ -= e->total_charge; } void LRUCacheShard::LRU_Insert(LRUHandle* e) { assert(e->next == nullptr); assert(e->prev == nullptr); - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); // Inset "e" to head of LRU list. e->next = &lru_; e->prev = lru_.prev; e->prev->next = e; e->next->prev = e; - lru_usage_ += total_charge; + lru_usage_ += e->total_charge; } void LRUCacheShard::EvictFromLRU(size_t charge, @@ -215,9 +214,8 @@ void LRUCacheShard::EvictFromLRU(size_t charge, LRU_Remove(old); table_.Remove(old->key(), old->hash); old->SetInCache(false); - size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= old_total_charge); - usage_ -= old_total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; deleted->push_back(old); } } @@ -245,16 +243,14 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, bool free_handle_on_fail) { Status s = Status::OK(); autovector last_reference_list; - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - { MutexLock l(&mutex_); // Free the space following strict LRU policy until enough space // is freed or the lru list is empty. - EvictFromLRU(total_charge, &last_reference_list); + EvictFromLRU(e->total_charge, &last_reference_list); - if ((usage_ + total_charge) > capacity_ && + if ((usage_ + e->total_charge) > capacity_ && (strict_capacity_limit_ || handle == nullptr)) { e->SetInCache(false); if (handle == nullptr) { @@ -272,7 +268,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, // Insert into the cache. Note that the cache might get larger than its // capacity if not enough space was freed up. LRUHandle* old = table_.Insert(e); - usage_ += total_charge; + usage_ += e->total_charge; if (old != nullptr) { s = Status::OkOverwritten(); assert(old->InCache()); @@ -280,10 +276,8 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, if (!old->HasRefs()) { // old is on LRU because it's in cache and its reference count is 0. LRU_Remove(old); - size_t old_total_charge = - old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= old_total_charge); - usage_ -= old_total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; last_reference_list.push_back(old); } } @@ -358,9 +352,8 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) { } // If it was the last reference, then decrement the cache usage. if (last_reference) { - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= e->total_charge); + usage_ -= e->total_charge; } } @@ -384,12 +377,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value, e->value = value; e->flags = 0; e->deleter = deleter; - e->charge = charge; e->key_length = key.size(); e->hash = hash; e->refs = 0; e->next = e->prev = nullptr; e->SetInCache(true); + e->CalcTotalCharge(charge, metadata_charge_policy_); memcpy(e->key_data, key.data(), key.size()); return InsertItem(e, handle, /* free_handle_on_fail */ true); @@ -407,9 +400,8 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) { if (!e->HasRefs()) { // The entry is in LRU since it's in hash and has no external references LRU_Remove(e); - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= e->total_charge); + usage_ -= e->total_charge; last_reference = true; } } @@ -473,7 +465,12 @@ void* LRUCache::Value(Handle* handle) { } size_t LRUCache::GetCharge(Handle* handle) const { - return reinterpret_cast(handle)->charge; + CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata; + if (num_shards_ > 0) { + metadata_charge_policy = shards_[0].metadata_charge_policy_; + } + return reinterpret_cast(handle)->GetCharge( + metadata_charge_policy); } Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const { diff --git a/cache/fast_lru_cache.h b/cache/fast_lru_cache.h index a672afaf7..9d3989ac7 100644 --- a/cache/fast_lru_cache.h +++ b/cache/fast_lru_cache.h @@ -29,7 +29,7 @@ struct LRUHandle { LRUHandle* next_hash; LRUHandle* next; LRUHandle* prev; - size_t charge; // TODO(opt): Only allow uint32_t? + size_t total_charge; // TODO(opt): Only allow uint32_t? size_t key_length; // The hash of key(). Used for fast sharding and comparisons. uint32_t hash; @@ -79,18 +79,31 @@ struct LRUHandle { } // Calculate the memory usage by metadata. - inline size_t CalcTotalCharge( - CacheMetadataChargePolicy metadata_charge_policy) { - size_t meta_charge = 0; - if (metadata_charge_policy == kFullChargeCacheMetadata) { + inline size_t CalcMetaCharge( + CacheMetadataChargePolicy metadata_charge_policy) const { + if (metadata_charge_policy != kFullChargeCacheMetadata) { + return 0; + } else { #ifdef ROCKSDB_MALLOC_USABLE_SIZE - meta_charge += malloc_usable_size(static_cast(this)); + return malloc_usable_size( + const_cast(static_cast(this))); #else // This is the size that is used when a new handle is created. - meta_charge += sizeof(LRUHandle) - 1 + key_length; + return sizeof(LRUHandle) - 1 + key_length; #endif } - return charge + meta_charge; + } + + inline void CalcTotalCharge( + size_t charge, CacheMetadataChargePolicy metadata_charge_policy) { + total_charge = charge + CalcMetaCharge(metadata_charge_policy); + } + + inline size_t GetCharge( + CacheMetadataChargePolicy metadata_charge_policy) const { + size_t meta_charge = CalcMetaCharge(metadata_charge_policy); + assert(total_charge >= meta_charge); + return total_charge - meta_charge; } }; diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index a28c2b515..eb978f4f0 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -143,9 +143,8 @@ void LRUCacheShard::EraseUnRefEntries() { LRU_Remove(old); table_.Remove(old->key(), old->hash); old->SetInCache(false); - size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; last_reference_list.push_back(old); } } @@ -182,11 +181,13 @@ void LRUCacheShard::ApplyToSomeEntries( } table_.ApplyToEntriesRange( - [callback](LRUHandle* h) { + [callback, + metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) { DeleterFn deleter = h->IsSecondaryCacheCompatible() ? h->info_.helper->del_cb : h->info_.deleter; - callback(h->key(), h->value, h->charge, deleter); + callback(h->key(), h->value, h->GetCharge(metadata_charge_policy), + deleter); }, index_begin, index_end); } @@ -222,19 +223,17 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) { e->next->prev = e->prev; e->prev->next = e->next; e->prev = e->next = nullptr; - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(lru_usage_ >= total_charge); - lru_usage_ -= total_charge; + assert(lru_usage_ >= e->total_charge); + lru_usage_ -= e->total_charge; if (e->InHighPriPool()) { - assert(high_pri_pool_usage_ >= total_charge); - high_pri_pool_usage_ -= total_charge; + assert(high_pri_pool_usage_ >= e->total_charge); + high_pri_pool_usage_ -= e->total_charge; } } void LRUCacheShard::LRU_Insert(LRUHandle* e) { assert(e->next == nullptr); assert(e->prev == nullptr); - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) { // Inset "e" to head of LRU list. e->next = &lru_; @@ -242,7 +241,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) { e->prev->next = e; e->next->prev = e; e->SetInHighPriPool(true); - high_pri_pool_usage_ += total_charge; + high_pri_pool_usage_ += e->total_charge; MaintainPoolSize(); } else { // Insert "e" to the head of low-pri pool. Note that when @@ -254,7 +253,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) { e->SetInHighPriPool(false); lru_low_pri_ = e; } - lru_usage_ += total_charge; + lru_usage_ += e->total_charge; } void LRUCacheShard::MaintainPoolSize() { @@ -263,10 +262,8 @@ void LRUCacheShard::MaintainPoolSize() { lru_low_pri_ = lru_low_pri_->next; assert(lru_low_pri_ != &lru_); lru_low_pri_->SetInHighPriPool(false); - size_t total_charge = - lru_low_pri_->CalcTotalCharge(metadata_charge_policy_); - assert(high_pri_pool_usage_ >= total_charge); - high_pri_pool_usage_ -= total_charge; + assert(high_pri_pool_usage_ >= lru_low_pri_->total_charge); + high_pri_pool_usage_ -= lru_low_pri_->total_charge; } } @@ -279,9 +276,8 @@ void LRUCacheShard::EvictFromLRU(size_t charge, LRU_Remove(old); table_.Remove(old->key(), old->hash); old->SetInCache(false); - size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= old_total_charge); - usage_ -= old_total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; deleted->push_back(old); } } @@ -316,16 +312,15 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, bool free_handle_on_fail) { Status s = Status::OK(); autovector last_reference_list; - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); { MutexLock l(&mutex_); // Free the space following strict LRU policy until enough space // is freed or the lru list is empty. - EvictFromLRU(total_charge, &last_reference_list); + EvictFromLRU(e->total_charge, &last_reference_list); - if ((usage_ + total_charge) > capacity_ && + if ((usage_ + e->total_charge) > capacity_ && (strict_capacity_limit_ || handle == nullptr)) { e->SetInCache(false); if (handle == nullptr) { @@ -343,7 +338,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, // Insert into the cache. Note that the cache might get larger than its // capacity if not enough space was freed up. LRUHandle* old = table_.Insert(e); - usage_ += total_charge; + usage_ += e->total_charge; if (old != nullptr) { s = Status::OkOverwritten(); assert(old->InCache()); @@ -351,10 +346,8 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle, if (!old->HasRefs()) { // old is on LRU because it's in cache and its reference count is 0. LRU_Remove(old); - size_t old_total_charge = - old->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= old_total_charge); - usage_ -= old_total_charge; + assert(usage_ >= old->total_charge); + usage_ -= old->total_charge; last_reference_list.push_back(old); } } @@ -391,7 +384,7 @@ void LRUCacheShard::Promote(LRUHandle* e) { e->SetIncomplete(false); e->SetInCache(true); e->value = secondary_handle->Value(); - e->charge = secondary_handle->Size(); + e->CalcTotalCharge(secondary_handle->Size(), metadata_charge_policy_); delete secondary_handle; // This call could fail if the cache is over capacity and @@ -410,7 +403,8 @@ void LRUCacheShard::Promote(LRUHandle* e) { // Since the secondary cache lookup failed, mark the item as not in cache // Don't charge the cache as its only metadata that'll shortly be released MutexLock l(&mutex_); - e->charge = 0; + // TODO + e->CalcTotalCharge(0, metadata_charge_policy_); e->SetInCache(false); } } @@ -538,9 +532,8 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) { // usage. If value is null in the latter case, taht means the lookup // failed and we didn't charge the cache. if (last_reference && (!e->IsSecondaryCacheCompatible() || e->value)) { - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= e->total_charge); + usage_ -= e->total_charge; } } @@ -573,7 +566,6 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value, #endif // __SANITIZE_THREAD__ e->info_.deleter = deleter; } - e->charge = charge; e->key_length = key.size(); e->hash = hash; e->refs = 0; @@ -581,6 +573,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value, e->SetInCache(true); e->SetPriority(priority); memcpy(e->key_data, key.data(), key.size()); + e->CalcTotalCharge(charge, metadata_charge_policy_); return InsertItem(e, handle, /* free_handle_on_fail */ true); } @@ -597,9 +590,8 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) { if (!e->HasRefs()) { // The entry is in LRU since it's in hash and has no external references LRU_Remove(e); - size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); - assert(usage_ >= total_charge); - usage_ -= total_charge; + assert(usage_ >= e->total_charge); + usage_ -= e->total_charge; last_reference = true; } } @@ -690,7 +682,12 @@ void* LRUCache::Value(Handle* handle) { } size_t LRUCache::GetCharge(Handle* handle) const { - return reinterpret_cast(handle)->charge; + CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata; + if (num_shards_ > 0) { + metadata_charge_policy = shards_[0].metadata_charge_policy_; + } + return reinterpret_cast(handle)->GetCharge( + metadata_charge_policy); } Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const { diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 2da78eb67..c718add77 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -66,7 +66,7 @@ struct LRUHandle { }; LRUHandle* next; LRUHandle* prev; - size_t charge; // TODO(opt): Only allow uint32_t? + size_t total_charge; // TODO(opt): Only allow uint32_t? size_t key_length; // The hash of key(). Used for fast sharding and comparisons. uint32_t hash; @@ -209,19 +209,32 @@ struct LRUHandle { delete[] reinterpret_cast(this); } - // Calculate the memory usage by metadata. - inline size_t CalcTotalCharge( - CacheMetadataChargePolicy metadata_charge_policy) { - size_t meta_charge = 0; - if (metadata_charge_policy == kFullChargeCacheMetadata) { + inline size_t CalcuMetaCharge( + CacheMetadataChargePolicy metadata_charge_policy) const { + if (metadata_charge_policy != kFullChargeCacheMetadata) { + return 0; + } else { #ifdef ROCKSDB_MALLOC_USABLE_SIZE - meta_charge += malloc_usable_size(static_cast(this)); + return malloc_usable_size( + const_cast(static_cast(this))); #else // This is the size that is used when a new handle is created. - meta_charge += sizeof(LRUHandle) - 1 + key_length; + return sizeof(LRUHandle) - 1 + key_length; #endif } - return charge + meta_charge; + } + + // Calculate the memory usage by metadata. + inline void CalcTotalCharge( + size_t charge, CacheMetadataChargePolicy metadata_charge_policy) { + total_charge = charge + CalcuMetaCharge(metadata_charge_policy); + } + + inline size_t GetCharge( + CacheMetadataChargePolicy metadata_charge_policy) const { + size_t meta_charge = CalcuMetaCharge(metadata_charge_policy); + assert(total_charge >= meta_charge); + return total_charge - meta_charge; } };