@ -9,77 +9,191 @@
# include "cache/fast_lru_cache.h"
# include "cache/fast_lru_cache.h"
# include <math.h>
# include <cassert>
# include <cassert>
# include <cstdint>
# include <cstdint>
# include <cstdio>
# include <cstdio>
# include <functional>
# include "monitoring/perf_context_imp.h"
# include "monitoring/perf_context_imp.h"
# include "monitoring/statistics.h"
# include "monitoring/statistics.h"
# include "port/lang.h"
# include "port/lang.h"
# include "util/distributed_mutex.h"
# include "util/distributed_mutex.h"
# include "util/hash.h"
# define KEY_LENGTH \
# include "util/random.h"
16 // TODO(guido) Make use of this symbol in other parts of the source code
// (e.g., cache_key.h, cache_test.cc, etc.)
namespace ROCKSDB_NAMESPACE {
namespace ROCKSDB_NAMESPACE {
namespace fast_lru_cache {
namespace fast_lru_cache {
LRUHandleTable : : LRUHandleTable ( int hash_bits )
namespace {
// Returns x % 2^{bits}.
inline uint32_t BinaryMod ( uint32_t x , uint8_t bits ) {
assert ( bits < = 32 ) ;
return ( x < < ( 32 - bits ) ) > > ( 32 - bits ) ;
}
} // anonymous namespace
LRUHandleTable : : LRUHandleTable ( uint8_t hash_bits )
: length_bits_ ( hash_bits ) ,
: length_bits_ ( hash_bits ) ,
list_ ( new LRUHandle * [ size_t { 1 } < < length_bits_ ] { } ) { }
occupancy_ ( 0 ) ,
array_ ( new LRUHandle [ size_t { 1 } < < length_bits_ ] ) {
assert ( hash_bits < = 32 ) ;
}
LRUHandleTable : : ~ LRUHandleTable ( ) {
LRUHandleTable : : ~ LRUHandleTable ( ) {
// TODO(Guido) If users still hold references to handles,
// those will become invalidated. And if we choose not to
// delete the data, it will become leaked.
ApplyToEntriesRange (
ApplyToEntriesRange (
[ ] ( LRUHandle * h ) {
[ ] ( LRUHandle * h ) {
// TODO(Guido) Remove the HasRefs() check?
if ( ! h - > HasRefs ( ) ) {
if ( ! h - > HasRefs ( ) ) {
h - > Free ( ) ;
h - > FreeData ( ) ;
}
}
} ,
} ,
0 , uint32_t { 1 } < < length_bits_ ) ;
0 , uint32_t { 1 } < < length_bits_ ) ;
}
}
LRUHandle * LRUHandleTable : : Lookup ( const Slice & key , uint32_t hash ) {
LRUHandle * LRUHandleTable : : Lookup ( const Slice & key , uint32_t hash ) {
return * FindPointer ( key , hash ) ;
int probe = 0 ;
}
int slot = FindVisibleElement ( key , hash , probe , 0 ) ;
return ( slot = = - 1 ) ? nullptr : & array_ [ slot ] ;
inline LRUHandle * * LRUHandleTable : : Head ( uint32_t hash ) {
}
return & list_ [ hash > > ( 32 - length_bits_ ) ] ;
}
LRUHandle * LRUHandleTable : : Insert ( LRUHandle * h , LRUHandle * * old ) {
int probe = 0 ;
LRUHandle * LRUHandleTable : : Insert ( LRUHandle * h ) {
int slot = FindVisibleElementOrAvailableSlot ( h - > key ( ) , h - > hash , probe ,
LRUHandle * * ptr = FindPointer ( h - > key ( ) , h - > hash ) ;
1 /*displacement*/ ) ;
LRUHandle * old = * ptr ;
* old = nullptr ;
h - > next_hash = ( old = = nullptr ? nullptr : old - > next_hash ) ;
if ( slot = = - 1 ) {
* ptr = h ;
return nullptr ;
return old ;
}
}
LRUHandle * LRUHandleTable : : Remove ( const Slice & key , uint32_t hash ) {
if ( array_ [ slot ] . IsEmpty ( ) | | array_ [ slot ] . IsTombstone ( ) ) {
LRUHandle * * ptr = FindPointer ( key , hash ) ;
bool empty = array_ [ slot ] . IsEmpty ( ) ;
LRUHandle * result = * ptr ;
Assign ( slot , h ) ;
if ( result ! = nullptr ) {
LRUHandle * new_entry = & array_ [ slot ] ;
* ptr = result - > next_hash ;
if ( empty ) {
// This used to be an empty slot.
return new_entry ;
}
// It used to be a tombstone, so there may already be a copy of the
// key in the table.
slot = FindVisibleElement ( h - > key ( ) , h - > hash , probe , 0 /*displacement*/ ) ;
if ( slot = = - 1 ) {
// No existing copy of the key.
return new_entry ;
}
* old = & array_ [ slot ] ;
return new_entry ;
} else {
// There is an existing copy of the key.
* old = & array_ [ slot ] ;
// Find an available slot for the new element.
array_ [ slot ] . displacements + + ;
slot = FindAvailableSlot ( h - > key ( ) , probe , 1 /*displacement*/ ) ;
if ( slot = = - 1 ) {
// No available slots. Roll back displacements.
probe = 0 ;
slot = FindVisibleElement ( h - > key ( ) , h - > hash , probe , - 1 ) ;
array_ [ slot ] . displacements - - ;
FindAvailableSlot ( h - > key ( ) , probe , - 1 ) ;
return nullptr ;
}
Assign ( slot , h ) ;
return & array_ [ slot ] ;
}
}
return result ;
}
}
LRUHandle * * LRUHandleTable : : FindPointer ( const Slice & key , uint32_t hash ) {
void LRUHandleTable : : Remove ( LRUHandle * h ) {
LRUHandle * * ptr = & list_ [ hash > > ( 32 - length_bits_ ) ] ;
assert ( h - > next = = nullptr & &
while ( * ptr ! = nullptr & & ( ( * ptr ) - > hash ! = hash | | key ! = ( * ptr ) - > key ( ) ) ) {
h - > prev = = nullptr ) ; // Already off the LRU list.
ptr = & ( * ptr ) - > next_hash ;
int probe = 0 ;
FindSlot (
h - > key ( ) , [ & h ] ( LRUHandle * e ) { return e = = h ; } , probe ,
- 1 /*displacement*/ ) ;
h - > SetIsVisible ( false ) ;
h - > SetIsElement ( false ) ;
occupancy_ - - ;
}
void LRUHandleTable : : Assign ( int slot , LRUHandle * h ) {
LRUHandle * dst = & array_ [ slot ] ;
uint32_t disp = dst - > displacements ;
* dst = * h ;
dst - > displacements = disp ;
dst - > SetIsVisible ( true ) ;
dst - > SetIsElement ( true ) ;
occupancy_ + + ;
}
void LRUHandleTable : : Exclude ( LRUHandle * h ) { h - > SetIsVisible ( false ) ; }
int LRUHandleTable : : FindVisibleElement ( const Slice & key , uint32_t hash ,
int & probe , int displacement ) {
return FindSlot (
key ,
[ & ] ( LRUHandle * h ) { return h - > Matches ( key , hash ) & & h - > IsVisible ( ) ; } ,
probe , displacement ) ;
}
int LRUHandleTable : : FindAvailableSlot ( const Slice & key , int & probe ,
int displacement ) {
return FindSlot (
key , [ ] ( LRUHandle * h ) { return h - > IsEmpty ( ) | | h - > IsTombstone ( ) ; } , probe ,
displacement ) ;
}
int LRUHandleTable : : FindVisibleElementOrAvailableSlot ( const Slice & key ,
uint32_t hash , int & probe ,
int displacement ) {
return FindSlot (
key ,
[ & ] ( LRUHandle * h ) {
return h - > IsEmpty ( ) | | h - > IsTombstone ( ) | |
( h - > Matches ( key , hash ) & & h - > IsVisible ( ) ) ;
} ,
probe , displacement ) ;
}
inline int LRUHandleTable : : FindSlot ( const Slice & key ,
std : : function < bool ( LRUHandle * ) > cond ,
int & probe , int displacement ) {
uint32_t base =
BinaryMod ( Hash ( key . data ( ) , key . size ( ) , kProbingSeed1 ) , length_bits_ ) ;
uint32_t increment = BinaryMod (
( Hash ( key . data ( ) , key . size ( ) , kProbingSeed2 ) < < 1 ) | 1 , length_bits_ ) ;
uint32_t current = BinaryMod ( base + probe * increment , length_bits_ ) ;
while ( true ) {
LRUHandle * h = & array_ [ current ] ;
probe + + ;
if ( current = = base & & probe > 1 ) {
// We looped back.
return - 1 ;
}
if ( cond ( h ) ) {
return current ;
}
if ( h - > IsEmpty ( ) ) {
// We check emptyness after the condition, because
// the condition may be emptyness.
return - 1 ;
}
h - > displacements + = displacement ;
current = BinaryMod ( current + increment , length_bits_ ) ;
}
}
return ptr ;
}
}
LRUCacheShard : : LRUCacheShard ( size_t capacity , size_t estimated_value_size ,
LRUCacheShard : : LRUCacheShard ( size_t capacity , size_t estimated_value_size ,
bool strict_capacity_limit ,
bool strict_capacity_limit ,
CacheMetadataChargePolicy metadata_charge_policy )
CacheMetadataChargePolicy metadata_charge_policy )
: capacity_ ( 0 ) ,
: capacity_ ( capacity ) ,
strict_capacity_limit_ ( strict_capacity_limit ) ,
strict_capacity_limit_ ( strict_capacity_limit ) ,
table_ (
table_ (
GetHashBits ( capacity , estimated_value_size , metadata_charge_policy ) ) ,
CalcHashBits ( capacity , estimated_value_size , metadata_charge_policy ) +
static_cast < uint8_t > ( ceil ( log2 ( 1.0 / kLoadFactor ) ) ) ) ,
usage_ ( 0 ) ,
usage_ ( 0 ) ,
lru_usage_ ( 0 ) {
lru_usage_ ( 0 ) {
set_metadata_charge_policy ( metadata_charge_policy ) ;
set_metadata_charge_policy ( metadata_charge_policy ) ;
@ -87,29 +201,27 @@ LRUCacheShard::LRUCacheShard(size_t capacity, size_t estimated_value_size,
lru_ . next = & lru_ ;
lru_ . next = & lru_ ;
lru_ . prev = & lru_ ;
lru_ . prev = & lru_ ;
lru_low_pri_ = & lru_ ;
lru_low_pri_ = & lru_ ;
SetCapacity ( capacity ) ;
}
}
void LRUCacheShard : : EraseUnRefEntries ( ) {
void LRUCacheShard : : EraseUnRefEntries ( ) {
autovector < LRUHandle * > last_reference_list ;
autovector < LRUHandle > last_reference_list ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
while ( lru_ . next ! = & lru_ ) {
while ( lru_ . next ! = & lru_ ) {
LRUHandle * old = lru_ . next ;
LRUHandle * old = lru_ . next ;
// LRU list contains only elements which can be evicted.
// LRU list contains only elements which can be evicted.
assert ( old - > InCach e ( ) & & ! old - > HasRefs ( ) ) ;
assert ( old - > IsVisibl e ( ) & & ! old - > HasRefs ( ) ) ;
LRU_Remove ( old ) ;
LRU_Remove ( old ) ;
table_ . Remove ( old - > key ( ) , old - > hash ) ;
table_ . Remove ( old ) ;
old - > SetInCache ( false ) ;
assert ( usage_ > = old - > total_charge ) ;
assert ( usage_ > = old - > total_charge ) ;
usage_ - = old - > total_charge ;
usage_ - = old - > total_charge ;
last_reference_list . push_back ( old ) ;
last_reference_list . push_back ( * old ) ;
}
}
}
}
// Free the entries here outside of mutex for performance reasons.
// Free the entries here outside of mutex for performance reasons.
for ( auto entry : last_reference_list ) {
for ( auto & h : last_reference_list ) {
entry - > Free ( ) ;
h . FreeData ( ) ;
}
}
}
}
@ -148,57 +260,48 @@ void LRUCacheShard::ApplyToSomeEntries(
index_begin , index_end ) ;
index_begin , index_end ) ;
}
}
void LRUCacheShard : : LRU_Remove ( LRUHandle * e ) {
void LRUCacheShard : : LRU_Remove ( LRUHandle * h ) {
assert ( e - > next ! = nullptr ) ;
assert ( h - > next ! = nullptr ) ;
assert ( e - > prev ! = nullptr ) ;
assert ( h - > prev ! = nullptr ) ;
e - > next - > prev = e - > prev ;
h - > next - > prev = h - > prev ;
e - > prev - > next = e - > next ;
h - > prev - > next = h - > next ;
e - > prev = e - > next = nullptr ;
h - > prev = h - > next = nullptr ;
assert ( lru_usage_ > = e - > total_charge ) ;
assert ( lru_usage_ > = h - > total_charge ) ;
lru_usage_ - = e - > total_charge ;
lru_usage_ - = h - > total_charge ;
}
}
void LRUCacheShard : : LRU_Insert ( LRUHandle * e ) {
void LRUCacheShard : : LRU_Insert ( LRUHandle * h ) {
assert ( e - > next = = nullptr ) ;
assert ( h - > next = = nullptr ) ;
assert ( e - > prev = = nullptr ) ;
assert ( h - > prev = = nullptr ) ;
// Inset "e" to head of LRU list.
// Insert h to head of LRU list.
e - > next = & lru_ ;
h - > next = & lru_ ;
e - > prev = lru_ . prev ;
h - > prev = lru_ . prev ;
e - > prev - > next = e ;
h - > prev - > next = h ;
e - > next - > prev = e ;
h - > next - > prev = h ;
lru_usage_ + = e - > total_charge ;
lru_usage_ + = h - > total_charge ;
}
}
void LRUCacheShard : : EvictFromLRU ( size_t charge ,
void LRUCacheShard : : EvictFromLRU ( size_t charge ,
autovector < LRUHandle * > * deleted ) {
autovector < LRUHandle > * deleted ) {
while ( ( usage_ + charge ) > capacity_ & & lru_ . next ! = & lru_ ) {
while ( ( usage_ + charge ) > capacity_ & & lru_ . next ! = & lru_ ) {
LRUHandle * old = lru_ . next ;
LRUHandle * old = lru_ . next ;
// LRU list contains only elements which can be evicted.
// LRU list contains only elements which can be evicted.
assert ( old - > InCach e ( ) & & ! old - > HasRefs ( ) ) ;
assert ( old - > IsVisibl e ( ) & & ! old - > HasRefs ( ) ) ;
LRU_Remove ( old ) ;
LRU_Remove ( old ) ;
table_ . Remove ( old - > key ( ) , old - > hash ) ;
table_ . Remove ( old ) ;
old - > SetInCache ( false ) ;
assert ( usage_ > = old - > total_charge ) ;
assert ( usage_ > = old - > total_charge ) ;
usage_ - = old - > total_charge ;
usage_ - = old - > total_charge ;
deleted - > push_back ( old ) ;
deleted - > push_back ( * old ) ;
}
}
}
}
int LRUCacheShard : : Get HashBits(
u int8_ t LRUCacheShard : : Calc HashBits(
size_t capacity , size_t estimated_value_size ,
size_t capacity , size_t estimated_value_size ,
CacheMetadataChargePolicy metadata_charge_policy ) {
CacheMetadataChargePolicy metadata_charge_policy ) {
LRUHandle * e = reinterpret_cast < LRUHandle * > (
LRUHandle h ;
new char [ sizeof ( LRUHandle ) - 1 + KEY_LENGTH ] ) ;
h . CalcTotalCharge ( estimated_value_size , metadata_charge_policy ) ;
e - > key_length = KEY_LENGTH ;
size_t num_entries = capacity / h . total_charge ;
e - > deleter = nullptr ;
uint8_t num_hash_bits = 0 ;
e - > refs = 0 ;
e - > flags = 0 ;
e - > refs = 0 ;
e - > CalcTotalCharge ( estimated_value_size , metadata_charge_policy ) ;
size_t num_entries = capacity / e - > total_charge ;
e - > Free ( ) ;
int num_hash_bits = 0 ;
while ( num_entries > > = 1 ) {
while ( num_entries > > = 1 ) {
+ + num_hash_bits ;
+ + num_hash_bits ;
}
}
@ -206,7 +309,8 @@ int LRUCacheShard::GetHashBits(
}
}
void LRUCacheShard : : SetCapacity ( size_t capacity ) {
void LRUCacheShard : : SetCapacity ( size_t capacity ) {
autovector < LRUHandle * > last_reference_list ;
assert ( false ) ; // Not supported. TODO(Guido) Support it?
autovector < LRUHandle > last_reference_list ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
capacity_ = capacity ;
capacity_ = capacity ;
@ -214,8 +318,8 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
}
}
// Free the entries here outside of mutex for performance reasons.
// Free the entries here outside of mutex for performance reasons.
for ( auto entry : last_reference_list ) {
for ( auto & h : last_reference_list ) {
entry - > Free ( ) ;
h . FreeData ( ) ;
}
}
}
}
@ -224,83 +328,104 @@ void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
strict_capacity_limit_ = strict_capacity_limit ;
strict_capacity_limit_ = strict_capacity_limit ;
}
}
Status LRUCacheShard : : InsertItem ( LRUHandle * e , Cache : : Handle * * handle ,
Status LRUCacheShard : : Insert ( const Slice & key , uint32_t hash , void * value ,
bool free_handle_on_fail ) {
size_t charge , Cache : : DeleterFn deleter ,
Cache : : Handle * * handle ,
Cache : : Priority /*priority*/ ) {
if ( key . size ( ) ! = kCacheKeySize ) {
return Status : : NotSupported ( " FastLRUCache only supports key size " +
std : : to_string ( kCacheKeySize ) + " B " ) ;
}
LRUHandle tmp ;
tmp . value = value ;
tmp . deleter = deleter ;
tmp . hash = hash ;
tmp . CalcTotalCharge ( charge , metadata_charge_policy_ ) ;
for ( int i = 0 ; i < kCacheKeySize ; i + + ) {
tmp . key_data [ i ] = key . data ( ) [ i ] ;
}
Status s = Status : : OK ( ) ;
Status s = Status : : OK ( ) ;
autovector < LRUHandle * > last_reference_list ;
autovector < LRUHandle > last_reference_list ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
// Free the space following strict LRU policy until enough space
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty.
// is freed or the lru list is empty.
EvictFromLRU ( e - > total_charge , & last_reference_list ) ;
EvictFromLRU ( tmp . total_charge , & last_reference_list ) ;
if ( ( usage_ + tmp . total_charge > capacity_ & &
if ( ( usage_ + e - > total_charge ) > capacity_ & &
( strict_capacity_limit_ | | handle = = nullptr ) ) | |
( strict_capacity_limit_ | | handle = = nullptr ) ) {
table_ . GetOccupancy ( ) = = size_t { 1 } < < table_ . GetLengthBits ( ) ) {
e - > SetInCache ( false ) ;
// Originally, when strict_capacity_limit_ == false and handle != nullptr
// (i.e., the user wants to immediately get a reference to the new
// handle), the insertion would proceed even if the total charge already
// exceeds capacity. We can't do this now, because we can't physically
// insert a new handle when the table is at maximum occupancy.
// TODO(Guido) Some tests (at least two from cache_test, as well as the
// stress tests) currently assume the old behavior.
if ( handle = = nullptr ) {
if ( handle = = nullptr ) {
// Don't insert the entry but still return ok, as if the entry inserted
// Don't insert the entry but still return ok, as if the entry inserted
// into cache and get evicted immediately.
// into cache and get evicted immediately.
last_reference_list . push_back ( e ) ;
last_reference_list . push_back ( tmp ) ;
} else {
} else {
if ( free_handle_on_fail ) {
delete [ ] reinterpret_cast < char * > ( e ) ;
* handle = nullptr ;
}
s = Status : : Incomplete ( " Insert failed due to LRU cache being full. " ) ;
s = Status : : Incomplete ( " Insert failed due to LRU cache being full. " ) ;
}
}
} else {
} else {
// Insert into the cache. Note that the cache might get larger than its
// Insert into the cache. Note that the cache might get larger than its
// capacity if not enough space was freed up.
// capacity if not enough space was freed up.
LRUHandle * old = table_ . Insert ( e ) ;
LRUHandle * old ;
usage_ + = e - > total_charge ;
LRUHandle * h = table_ . Insert ( & tmp , & old ) ;
assert ( h ! = nullptr ) ; // Insertions should never fail.
usage_ + = h - > total_charge ;
if ( old ! = nullptr ) {
if ( old ! = nullptr ) {
s = Status : : OkOverwritten ( ) ;
s = Status : : OkOverwritten ( ) ;
assert ( old - > InCach e ( ) ) ;
assert ( old - > IsVisibl e ( ) ) ;
old - > SetInCache ( false ) ;
table_ . Exclude ( old ) ;
if ( ! old - > HasRefs ( ) ) {
if ( ! old - > HasRefs ( ) ) {
// old is on LRU because it's in cache and its reference count is 0.
// old is on LRU because it's in cache and its reference count is 0.
LRU_Remove ( old ) ;
LRU_Remove ( old ) ;
table_ . Remove ( old ) ;
assert ( usage_ > = old - > total_charge ) ;
assert ( usage_ > = old - > total_charge ) ;
usage_ - = old - > total_charge ;
usage_ - = old - > total_charge ;
last_reference_list . push_back ( old ) ;
last_reference_list . push_back ( * old ) ;
}
}
}
}
if ( handle = = nullptr ) {
if ( handle = = nullptr ) {
LRU_Insert ( e ) ;
LRU_Insert ( h ) ;
} else {
} else {
// If caller already holds a ref, no need to take one here.
// If caller already holds a ref, no need to take one here.
if ( ! e - > HasRefs ( ) ) {
if ( ! h - > HasRefs ( ) ) {
e - > Ref ( ) ;
h - > Ref ( ) ;
}
}
* handle = reinterpret_cast < Cache : : Handle * > ( e ) ;
* handle = reinterpret_cast < Cache : : Handle * > ( h ) ;
}
}
}
}
}
}
// Free the entries here outside of mutex for performance reasons.
// Free the entries here outside of mutex for performance reasons.
for ( auto entry : last_reference_list ) {
for ( auto & h : last_reference_list ) {
entry - > Free ( ) ;
h . FreeData ( ) ;
}
}
return s ;
return s ;
}
}
Cache : : Handle * LRUCacheShard : : Lookup ( const Slice & key , uint32_t hash ) {
Cache : : Handle * LRUCacheShard : : Lookup ( const Slice & key , uint32_t hash ) {
LRUHandle * e = nullptr ;
LRUHandle * h = nullptr ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
e = table_ . Lookup ( key , hash ) ;
h = table_ . Lookup ( key , hash ) ;
if ( e ! = nullptr ) {
if ( h ! = nullptr ) {
assert ( e - > InCach e( ) ) ;
assert ( h - > IsVisibl e( ) ) ;
if ( ! e - > HasRefs ( ) ) {
if ( ! h - > HasRefs ( ) ) {
// The entry is in LRU since it's in hash and has no external references
// The entry is in LRU since it's in hash and has no external references
LRU_Remove ( e ) ;
LRU_Remove ( h ) ;
}
}
e - > Ref ( ) ;
h - > Ref ( ) ;
}
}
}
}
return reinterpret_cast < Cache : : Handle * > ( e ) ;
return reinterpret_cast < Cache : : Handle * > ( h ) ;
}
}
bool LRUCacheShard : : Ref ( Cache : : Handle * h ) {
bool LRUCacheShard : : Ref ( Cache : : Handle * h ) {
@ -316,91 +441,64 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
if ( handle = = nullptr ) {
if ( handle = = nullptr ) {
return false ;
return false ;
}
}
LRUHandle * e = reinterpret_cast < LRUHandle * > ( handle ) ;
LRUHandle * h = reinterpret_cast < LRUHandle * > ( handle ) ;
LRUHandle copy ;
bool last_reference = false ;
bool last_reference = false ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
last_reference = e - > Unref ( ) ;
last_reference = h - > Unref ( ) ;
if ( last_reference & & e - > InCach e( ) ) {
if ( last_reference & & h - > IsVisibl e( ) ) {
// The item is still in cache, and nobody else holds a reference to it.
// The item is still in cache, and nobody else holds a reference to it.
if ( usage_ > capacity_ | | erase_if_last_ref ) {
if ( usage_ > capacity_ | | erase_if_last_ref ) {
// The LRU list must be empty since the cache is full.
// The LRU list must be empty since the cache is full.
assert ( lru_ . next = = & lru_ | | erase_if_last_ref ) ;
assert ( lru_ . next = = & lru_ | | erase_if_last_ref ) ;
// Take this opportunity and remove the item.
// Take this opportunity and remove the item.
table_ . Remove ( e - > key ( ) , e - > hash ) ;
table_ . Remove ( h ) ;
e - > SetInCache ( false ) ;
} else {
} else {
// Put the item back on the LRU list, and don't free it.
// Put the item back on the LRU list, and don't free it.
LRU_Insert ( e ) ;
LRU_Insert ( h ) ;
last_reference = false ;
last_reference = false ;
}
}
}
}
// If it was the last reference, then decrement the cache usage.
// If it was the last reference, then decrement the cache usage.
if ( last_reference ) {
if ( last_reference ) {
assert ( usage_ > = e - > total_charge ) ;
assert ( usage_ > = h - > total_charge ) ;
usage_ - = e - > total_charge ;
usage_ - = h - > total_charge ;
copy = * h ;
}
}
}
}
// Free the entry here outside of mutex for performance reasons.
// Free the entry here outside of mutex for performance reasons.
if ( last_reference ) {
if ( last_reference ) {
e - > Free ( ) ;
copy . FreeData ( ) ;
}
}
return last_reference ;
return last_reference ;
}
}
Status LRUCacheShard : : Insert ( const Slice & key , uint32_t hash , void * value ,
size_t charge , Cache : : DeleterFn deleter ,
Cache : : Handle * * handle ,
Cache : : Priority /*priority*/ ) {
if ( key . size ( ) ! = KEY_LENGTH ) {
return Status : : NotSupported ( " FastLRUCache only supports key size " +
std : : to_string ( KEY_LENGTH ) + " B " ) ;
}
// Allocate the memory here outside of the mutex.
// If the cache is full, we'll have to release it.
// It shouldn't happen very often though.
LRUHandle * e = reinterpret_cast < LRUHandle * > (
new char [ sizeof ( LRUHandle ) - 1 + key . size ( ) ] ) ;
e - > value = value ;
e - > flags = 0 ;
e - > deleter = deleter ;
e - > key_length = key . size ( ) ;
e - > hash = hash ;
e - > refs = 0 ;
e - > next = e - > prev = nullptr ;
e - > SetInCache ( true ) ;
e - > CalcTotalCharge ( charge , metadata_charge_policy_ ) ;
memcpy ( e - > key_data , key . data ( ) , key . size ( ) ) ;
return InsertItem ( e , handle , /* free_handle_on_fail */ true ) ;
}
void LRUCacheShard : : Erase ( const Slice & key , uint32_t hash ) {
void LRUCacheShard : : Erase ( const Slice & key , uint32_t hash ) {
LRUHandle * e ;
LRUHandle copy ;
bool last_reference = false ;
bool last_reference = false ;
{
{
DMutexLock l ( mutex_ ) ;
DMutexLock l ( mutex_ ) ;
e = table_ . Remove ( key , hash ) ;
LRUHandle * h = table_ . Lookup ( key , hash ) ;
if ( e ! = nullptr ) {
if ( h ! = nullptr ) {
assert ( e - > InCache ( ) ) ;
table_ . Exclude ( h ) ;
e - > SetInCache ( false ) ;
if ( ! h - > HasRefs ( ) ) {
if ( ! e - > HasRefs ( ) ) {
// The entry is in LRU since it's in cache and has no external
// The entry is in LRU since it's in hash and has no external references
// references
LRU_Remove ( e ) ;
LRU_Remove ( h ) ;
assert ( usage_ > = e - > total_charge ) ;
table_ . Remove ( h ) ;
usage_ - = e - > total_charge ;
assert ( usage_ > = h - > total_charge ) ;
usage_ - = h - > total_charge ;
last_reference = true ;
last_reference = true ;
copy = * h ;
}
}
}
}
}
}
// Free the entry here outside of mutex for performance reasons.
// Free the entry here outside of mutex for performance reasons.
// last_reference will only be true if e != nullptr.
// last_reference will only be true if e != nullptr.
if ( last_reference ) {
if ( last_reference ) {
e - > Free ( ) ;
copy . FreeData ( ) ;
}
}
}
}