Avoid malloc_usable_size() call inside LRU Cache mutex (#10026)

Summary:
In LRU Cache mutex, we sometimes call malloc_usable_size() to calculate memory used by the metadata object. We prevent it by saving the charge + metadata size, rather than charge, inside the metadata itself. Within the mutex, usually only total charge is needed so we don't need to repeat.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10026

Test Plan: Run existing tests.

Reviewed By: pdillinger

Differential Revision: D36556253

fbshipit-source-id: f60c96d13cde3af77732e5548e4eac4182fa9801
main
sdong 3 years ago committed by Facebook GitHub Bot
parent d4081bf0be
commit c78a87cd71
  1. 2
      HISTORY.md
  2. 57
      cache/fast_lru_cache.cc
  3. 29
      cache/fast_lru_cache.h
  4. 73
      cache/lru_cache.cc
  5. 31
      cache/lru_cache.h

@ -44,6 +44,8 @@
* ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`). * ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`).
* During Flush write or Compaction write/read, the WriteController is used to determine whether DB writes are stalled or slowed down. The priority (Env::IOPriority) can then be determined accordingly and be passed in IOOptions to the file system. * During Flush write or Compaction write/read, the WriteController is used to determine whether DB writes are stalled or slowed down. The priority (Env::IOPriority) can then be determined accordingly and be passed in IOOptions to the file system.
### Performance Improvements
* Avoid calling malloc_usable_size() in LRU Cache's mutex.
## 7.2.0 (04/15/2022) ## 7.2.0 (04/15/2022)
### Bug Fixes ### Bug Fixes

@ -137,9 +137,8 @@ void LRUCacheShard::EraseUnRefEntries() {
LRU_Remove(old); LRU_Remove(old);
table_.Remove(old->key(), old->hash); table_.Remove(old->key(), old->hash);
old->SetInCache(false); old->SetInCache(false);
size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= old->total_charge);
assert(usage_ >= total_charge); usage_ -= old->total_charge;
usage_ -= total_charge;
last_reference_list.push_back(old); last_reference_list.push_back(old);
} }
} }
@ -177,8 +176,10 @@ void LRUCacheShard::ApplyToSomeEntries(
} }
table_.ApplyToEntriesRange( table_.ApplyToEntriesRange(
[callback](LRUHandle* h) { [callback,
callback(h->key(), h->value, h->charge, h->deleter); metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
h->deleter);
}, },
index_begin, index_end); index_begin, index_end);
} }
@ -189,21 +190,19 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev; e->next->prev = e->prev;
e->prev->next = e->next; e->prev->next = e->next;
e->prev = e->next = nullptr; e->prev = e->next = nullptr;
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(lru_usage_ >= e->total_charge);
assert(lru_usage_ >= total_charge); lru_usage_ -= e->total_charge;
lru_usage_ -= total_charge;
} }
void LRUCacheShard::LRU_Insert(LRUHandle* e) { void LRUCacheShard::LRU_Insert(LRUHandle* e) {
assert(e->next == nullptr); assert(e->next == nullptr);
assert(e->prev == nullptr); assert(e->prev == nullptr);
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
// Inset "e" to head of LRU list. // Inset "e" to head of LRU list.
e->next = &lru_; e->next = &lru_;
e->prev = lru_.prev; e->prev = lru_.prev;
e->prev->next = e; e->prev->next = e;
e->next->prev = e; e->next->prev = e;
lru_usage_ += total_charge; lru_usage_ += e->total_charge;
} }
void LRUCacheShard::EvictFromLRU(size_t charge, void LRUCacheShard::EvictFromLRU(size_t charge,
@ -215,9 +214,8 @@ void LRUCacheShard::EvictFromLRU(size_t charge,
LRU_Remove(old); LRU_Remove(old);
table_.Remove(old->key(), old->hash); table_.Remove(old->key(), old->hash);
old->SetInCache(false); old->SetInCache(false);
size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= old->total_charge);
assert(usage_ >= old_total_charge); usage_ -= old->total_charge;
usage_ -= old_total_charge;
deleted->push_back(old); deleted->push_back(old);
} }
} }
@ -245,16 +243,14 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
bool free_handle_on_fail) { bool free_handle_on_fail) {
Status s = Status::OK(); Status s = Status::OK();
autovector<LRUHandle*> last_reference_list; autovector<LRUHandle*> last_reference_list;
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
{ {
MutexLock l(&mutex_); MutexLock l(&mutex_);
// Free the space following strict LRU policy until enough space // Free the space following strict LRU policy until enough space
// is freed or the lru list is empty. // is freed or the lru list is empty.
EvictFromLRU(total_charge, &last_reference_list); EvictFromLRU(e->total_charge, &last_reference_list);
if ((usage_ + total_charge) > capacity_ && if ((usage_ + e->total_charge) > capacity_ &&
(strict_capacity_limit_ || handle == nullptr)) { (strict_capacity_limit_ || handle == nullptr)) {
e->SetInCache(false); e->SetInCache(false);
if (handle == nullptr) { if (handle == nullptr) {
@ -272,7 +268,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
// Insert into the cache. Note that the cache might get larger than its // Insert into the cache. Note that the cache might get larger than its
// capacity if not enough space was freed up. // capacity if not enough space was freed up.
LRUHandle* old = table_.Insert(e); LRUHandle* old = table_.Insert(e);
usage_ += total_charge; usage_ += e->total_charge;
if (old != nullptr) { if (old != nullptr) {
s = Status::OkOverwritten(); s = Status::OkOverwritten();
assert(old->InCache()); assert(old->InCache());
@ -280,10 +276,8 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
if (!old->HasRefs()) { if (!old->HasRefs()) {
// old is on LRU because it's in cache and its reference count is 0. // old is on LRU because it's in cache and its reference count is 0.
LRU_Remove(old); LRU_Remove(old);
size_t old_total_charge = assert(usage_ >= old->total_charge);
old->CalcTotalCharge(metadata_charge_policy_); usage_ -= old->total_charge;
assert(usage_ >= old_total_charge);
usage_ -= old_total_charge;
last_reference_list.push_back(old); last_reference_list.push_back(old);
} }
} }
@ -358,9 +352,8 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
} }
// If it was the last reference, then decrement the cache usage. // If it was the last reference, then decrement the cache usage.
if (last_reference) { if (last_reference) {
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= e->total_charge);
assert(usage_ >= total_charge); usage_ -= e->total_charge;
usage_ -= total_charge;
} }
} }
@ -384,12 +377,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
e->value = value; e->value = value;
e->flags = 0; e->flags = 0;
e->deleter = deleter; e->deleter = deleter;
e->charge = charge;
e->key_length = key.size(); e->key_length = key.size();
e->hash = hash; e->hash = hash;
e->refs = 0; e->refs = 0;
e->next = e->prev = nullptr; e->next = e->prev = nullptr;
e->SetInCache(true); e->SetInCache(true);
e->CalcTotalCharge(charge, metadata_charge_policy_);
memcpy(e->key_data, key.data(), key.size()); memcpy(e->key_data, key.data(), key.size());
return InsertItem(e, handle, /* free_handle_on_fail */ true); return InsertItem(e, handle, /* free_handle_on_fail */ true);
@ -407,9 +400,8 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
if (!e->HasRefs()) { if (!e->HasRefs()) {
// The entry is in LRU since it's in hash and has no external references // The entry is in LRU since it's in hash and has no external references
LRU_Remove(e); LRU_Remove(e);
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= e->total_charge);
assert(usage_ >= total_charge); usage_ -= e->total_charge;
usage_ -= total_charge;
last_reference = true; last_reference = true;
} }
} }
@ -473,7 +465,12 @@ void* LRUCache::Value(Handle* handle) {
} }
size_t LRUCache::GetCharge(Handle* handle) const { size_t LRUCache::GetCharge(Handle* handle) const {
return reinterpret_cast<const LRUHandle*>(handle)->charge; CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata;
if (num_shards_ > 0) {
metadata_charge_policy = shards_[0].metadata_charge_policy_;
}
return reinterpret_cast<const LRUHandle*>(handle)->GetCharge(
metadata_charge_policy);
} }
Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const { Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {

@ -29,7 +29,7 @@ struct LRUHandle {
LRUHandle* next_hash; LRUHandle* next_hash;
LRUHandle* next; LRUHandle* next;
LRUHandle* prev; LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t? size_t total_charge; // TODO(opt): Only allow uint32_t?
size_t key_length; size_t key_length;
// The hash of key(). Used for fast sharding and comparisons. // The hash of key(). Used for fast sharding and comparisons.
uint32_t hash; uint32_t hash;
@ -79,18 +79,31 @@ struct LRUHandle {
} }
// Calculate the memory usage by metadata. // Calculate the memory usage by metadata.
inline size_t CalcTotalCharge( inline size_t CalcMetaCharge(
CacheMetadataChargePolicy metadata_charge_policy) { CacheMetadataChargePolicy metadata_charge_policy) const {
size_t meta_charge = 0; if (metadata_charge_policy != kFullChargeCacheMetadata) {
if (metadata_charge_policy == kFullChargeCacheMetadata) { return 0;
} else {
#ifdef ROCKSDB_MALLOC_USABLE_SIZE #ifdef ROCKSDB_MALLOC_USABLE_SIZE
meta_charge += malloc_usable_size(static_cast<void*>(this)); return malloc_usable_size(
const_cast<void*>(static_cast<const void*>(this)));
#else #else
// This is the size that is used when a new handle is created. // This is the size that is used when a new handle is created.
meta_charge += sizeof(LRUHandle) - 1 + key_length; return sizeof(LRUHandle) - 1 + key_length;
#endif #endif
} }
return charge + meta_charge; }
inline void CalcTotalCharge(
size_t charge, CacheMetadataChargePolicy metadata_charge_policy) {
total_charge = charge + CalcMetaCharge(metadata_charge_policy);
}
inline size_t GetCharge(
CacheMetadataChargePolicy metadata_charge_policy) const {
size_t meta_charge = CalcMetaCharge(metadata_charge_policy);
assert(total_charge >= meta_charge);
return total_charge - meta_charge;
} }
}; };

73
cache/lru_cache.cc vendored

@ -143,9 +143,8 @@ void LRUCacheShard::EraseUnRefEntries() {
LRU_Remove(old); LRU_Remove(old);
table_.Remove(old->key(), old->hash); table_.Remove(old->key(), old->hash);
old->SetInCache(false); old->SetInCache(false);
size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= old->total_charge);
assert(usage_ >= total_charge); usage_ -= old->total_charge;
usage_ -= total_charge;
last_reference_list.push_back(old); last_reference_list.push_back(old);
} }
} }
@ -182,11 +181,13 @@ void LRUCacheShard::ApplyToSomeEntries(
} }
table_.ApplyToEntriesRange( table_.ApplyToEntriesRange(
[callback](LRUHandle* h) { [callback,
metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
DeleterFn deleter = h->IsSecondaryCacheCompatible() DeleterFn deleter = h->IsSecondaryCacheCompatible()
? h->info_.helper->del_cb ? h->info_.helper->del_cb
: h->info_.deleter; : h->info_.deleter;
callback(h->key(), h->value, h->charge, deleter); callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
deleter);
}, },
index_begin, index_end); index_begin, index_end);
} }
@ -222,19 +223,17 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev; e->next->prev = e->prev;
e->prev->next = e->next; e->prev->next = e->next;
e->prev = e->next = nullptr; e->prev = e->next = nullptr;
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(lru_usage_ >= e->total_charge);
assert(lru_usage_ >= total_charge); lru_usage_ -= e->total_charge;
lru_usage_ -= total_charge;
if (e->InHighPriPool()) { if (e->InHighPriPool()) {
assert(high_pri_pool_usage_ >= total_charge); assert(high_pri_pool_usage_ >= e->total_charge);
high_pri_pool_usage_ -= total_charge; high_pri_pool_usage_ -= e->total_charge;
} }
} }
void LRUCacheShard::LRU_Insert(LRUHandle* e) { void LRUCacheShard::LRU_Insert(LRUHandle* e) {
assert(e->next == nullptr); assert(e->next == nullptr);
assert(e->prev == nullptr); assert(e->prev == nullptr);
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) { if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
// Inset "e" to head of LRU list. // Inset "e" to head of LRU list.
e->next = &lru_; e->next = &lru_;
@ -242,7 +241,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
e->prev->next = e; e->prev->next = e;
e->next->prev = e; e->next->prev = e;
e->SetInHighPriPool(true); e->SetInHighPriPool(true);
high_pri_pool_usage_ += total_charge; high_pri_pool_usage_ += e->total_charge;
MaintainPoolSize(); MaintainPoolSize();
} else { } else {
// Insert "e" to the head of low-pri pool. Note that when // Insert "e" to the head of low-pri pool. Note that when
@ -254,7 +253,7 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
e->SetInHighPriPool(false); e->SetInHighPriPool(false);
lru_low_pri_ = e; lru_low_pri_ = e;
} }
lru_usage_ += total_charge; lru_usage_ += e->total_charge;
} }
void LRUCacheShard::MaintainPoolSize() { void LRUCacheShard::MaintainPoolSize() {
@ -263,10 +262,8 @@ void LRUCacheShard::MaintainPoolSize() {
lru_low_pri_ = lru_low_pri_->next; lru_low_pri_ = lru_low_pri_->next;
assert(lru_low_pri_ != &lru_); assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false); lru_low_pri_->SetInHighPriPool(false);
size_t total_charge = assert(high_pri_pool_usage_ >= lru_low_pri_->total_charge);
lru_low_pri_->CalcTotalCharge(metadata_charge_policy_); high_pri_pool_usage_ -= lru_low_pri_->total_charge;
assert(high_pri_pool_usage_ >= total_charge);
high_pri_pool_usage_ -= total_charge;
} }
} }
@ -279,9 +276,8 @@ void LRUCacheShard::EvictFromLRU(size_t charge,
LRU_Remove(old); LRU_Remove(old);
table_.Remove(old->key(), old->hash); table_.Remove(old->key(), old->hash);
old->SetInCache(false); old->SetInCache(false);
size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= old->total_charge);
assert(usage_ >= old_total_charge); usage_ -= old->total_charge;
usage_ -= old_total_charge;
deleted->push_back(old); deleted->push_back(old);
} }
} }
@ -316,16 +312,15 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
bool free_handle_on_fail) { bool free_handle_on_fail) {
Status s = Status::OK(); Status s = Status::OK();
autovector<LRUHandle*> last_reference_list; autovector<LRUHandle*> last_reference_list;
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
{ {
MutexLock l(&mutex_); MutexLock l(&mutex_);
// Free the space following strict LRU policy until enough space // Free the space following strict LRU policy until enough space
// is freed or the lru list is empty. // is freed or the lru list is empty.
EvictFromLRU(total_charge, &last_reference_list); EvictFromLRU(e->total_charge, &last_reference_list);
if ((usage_ + total_charge) > capacity_ && if ((usage_ + e->total_charge) > capacity_ &&
(strict_capacity_limit_ || handle == nullptr)) { (strict_capacity_limit_ || handle == nullptr)) {
e->SetInCache(false); e->SetInCache(false);
if (handle == nullptr) { if (handle == nullptr) {
@ -343,7 +338,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
// Insert into the cache. Note that the cache might get larger than its // Insert into the cache. Note that the cache might get larger than its
// capacity if not enough space was freed up. // capacity if not enough space was freed up.
LRUHandle* old = table_.Insert(e); LRUHandle* old = table_.Insert(e);
usage_ += total_charge; usage_ += e->total_charge;
if (old != nullptr) { if (old != nullptr) {
s = Status::OkOverwritten(); s = Status::OkOverwritten();
assert(old->InCache()); assert(old->InCache());
@ -351,10 +346,8 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
if (!old->HasRefs()) { if (!old->HasRefs()) {
// old is on LRU because it's in cache and its reference count is 0. // old is on LRU because it's in cache and its reference count is 0.
LRU_Remove(old); LRU_Remove(old);
size_t old_total_charge = assert(usage_ >= old->total_charge);
old->CalcTotalCharge(metadata_charge_policy_); usage_ -= old->total_charge;
assert(usage_ >= old_total_charge);
usage_ -= old_total_charge;
last_reference_list.push_back(old); last_reference_list.push_back(old);
} }
} }
@ -391,7 +384,7 @@ void LRUCacheShard::Promote(LRUHandle* e) {
e->SetIncomplete(false); e->SetIncomplete(false);
e->SetInCache(true); e->SetInCache(true);
e->value = secondary_handle->Value(); e->value = secondary_handle->Value();
e->charge = secondary_handle->Size(); e->CalcTotalCharge(secondary_handle->Size(), metadata_charge_policy_);
delete secondary_handle; delete secondary_handle;
// This call could fail if the cache is over capacity and // This call could fail if the cache is over capacity and
@ -410,7 +403,8 @@ void LRUCacheShard::Promote(LRUHandle* e) {
// Since the secondary cache lookup failed, mark the item as not in cache // Since the secondary cache lookup failed, mark the item as not in cache
// Don't charge the cache as its only metadata that'll shortly be released // Don't charge the cache as its only metadata that'll shortly be released
MutexLock l(&mutex_); MutexLock l(&mutex_);
e->charge = 0; // TODO
e->CalcTotalCharge(0, metadata_charge_policy_);
e->SetInCache(false); e->SetInCache(false);
} }
} }
@ -538,9 +532,8 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
// usage. If value is null in the latter case, taht means the lookup // usage. If value is null in the latter case, taht means the lookup
// failed and we didn't charge the cache. // failed and we didn't charge the cache.
if (last_reference && (!e->IsSecondaryCacheCompatible() || e->value)) { if (last_reference && (!e->IsSecondaryCacheCompatible() || e->value)) {
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= e->total_charge);
assert(usage_ >= total_charge); usage_ -= e->total_charge;
usage_ -= total_charge;
} }
} }
@ -573,7 +566,6 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
#endif // __SANITIZE_THREAD__ #endif // __SANITIZE_THREAD__
e->info_.deleter = deleter; e->info_.deleter = deleter;
} }
e->charge = charge;
e->key_length = key.size(); e->key_length = key.size();
e->hash = hash; e->hash = hash;
e->refs = 0; e->refs = 0;
@ -581,6 +573,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
e->SetInCache(true); e->SetInCache(true);
e->SetPriority(priority); e->SetPriority(priority);
memcpy(e->key_data, key.data(), key.size()); memcpy(e->key_data, key.data(), key.size());
e->CalcTotalCharge(charge, metadata_charge_policy_);
return InsertItem(e, handle, /* free_handle_on_fail */ true); return InsertItem(e, handle, /* free_handle_on_fail */ true);
} }
@ -597,9 +590,8 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
if (!e->HasRefs()) { if (!e->HasRefs()) {
// The entry is in LRU since it's in hash and has no external references // The entry is in LRU since it's in hash and has no external references
LRU_Remove(e); LRU_Remove(e);
size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); assert(usage_ >= e->total_charge);
assert(usage_ >= total_charge); usage_ -= e->total_charge;
usage_ -= total_charge;
last_reference = true; last_reference = true;
} }
} }
@ -690,7 +682,12 @@ void* LRUCache::Value(Handle* handle) {
} }
size_t LRUCache::GetCharge(Handle* handle) const { size_t LRUCache::GetCharge(Handle* handle) const {
return reinterpret_cast<const LRUHandle*>(handle)->charge; CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata;
if (num_shards_ > 0) {
metadata_charge_policy = shards_[0].metadata_charge_policy_;
}
return reinterpret_cast<const LRUHandle*>(handle)->GetCharge(
metadata_charge_policy);
} }
Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const { Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {

31
cache/lru_cache.h vendored

@ -66,7 +66,7 @@ struct LRUHandle {
}; };
LRUHandle* next; LRUHandle* next;
LRUHandle* prev; LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t? size_t total_charge; // TODO(opt): Only allow uint32_t?
size_t key_length; size_t key_length;
// The hash of key(). Used for fast sharding and comparisons. // The hash of key(). Used for fast sharding and comparisons.
uint32_t hash; uint32_t hash;
@ -209,19 +209,32 @@ struct LRUHandle {
delete[] reinterpret_cast<char*>(this); delete[] reinterpret_cast<char*>(this);
} }
// Calculate the memory usage by metadata. inline size_t CalcuMetaCharge(
inline size_t CalcTotalCharge( CacheMetadataChargePolicy metadata_charge_policy) const {
CacheMetadataChargePolicy metadata_charge_policy) { if (metadata_charge_policy != kFullChargeCacheMetadata) {
size_t meta_charge = 0; return 0;
if (metadata_charge_policy == kFullChargeCacheMetadata) { } else {
#ifdef ROCKSDB_MALLOC_USABLE_SIZE #ifdef ROCKSDB_MALLOC_USABLE_SIZE
meta_charge += malloc_usable_size(static_cast<void*>(this)); return malloc_usable_size(
const_cast<void*>(static_cast<const void*>(this)));
#else #else
// This is the size that is used when a new handle is created. // This is the size that is used when a new handle is created.
meta_charge += sizeof(LRUHandle) - 1 + key_length; return sizeof(LRUHandle) - 1 + key_length;
#endif #endif
} }
return charge + meta_charge; }
// Calculate the memory usage by metadata.
inline void CalcTotalCharge(
size_t charge, CacheMetadataChargePolicy metadata_charge_policy) {
total_charge = charge + CalcuMetaCharge(metadata_charge_policy);
}
inline size_t GetCharge(
CacheMetadataChargePolicy metadata_charge_policy) const {
size_t meta_charge = CalcuMetaCharge(metadata_charge_policy);
assert(total_charge >= meta_charge);
return total_charge - meta_charge;
} }
}; };

Loading…
Cancel
Save