@ -24,7 +24,7 @@ LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
LRUHandleTable : : ~ LRUHandleTable ( ) {
ApplyToAllCacheEntries ( [ ] ( LRUHandle * h ) {
if ( h - > refs = = 1 ) {
if ( ! h - > HasRefs ( ) ) {
h - > Free ( ) ;
}
} ) ;
@ -113,29 +113,17 @@ LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
SetCapacity ( capacity ) ;
}
LRUCacheShard : : ~ LRUCacheShard ( ) { }
bool LRUCacheShard : : Unref ( LRUHandle * e ) {
assert ( e - > refs > 0 ) ;
e - > refs - - ;
return e - > refs = = 0 ;
}
// Call deleter and free
void LRUCacheShard : : EraseUnRefEntries ( ) {
autovector < LRUHandle * > last_reference_list ;
{
MutexLock l ( & mutex_ ) ;
while ( lru_ . next ! = & lru_ ) {
LRUHandle * old = lru_ . next ;
assert ( old - > InCache ( ) ) ;
assert ( old - > refs = =
1 ) ; // LRU list contains elements which may be evicted
// LRU list contains only elements which can be evicted
assert ( old - > InCache ( ) & & ! old - > HasRefs ( ) ) ;
LRU_Remove ( old ) ;
table_ . Remove ( old - > key ( ) , old - > hash ) ;
old - > SetInCache ( false ) ;
Unref ( old ) ;
usage_ - = old - > charge ;
last_reference_list . push_back ( old ) ;
}
@ -148,22 +136,27 @@ void LRUCacheShard::EraseUnRefEntries() {
void LRUCacheShard : : ApplyToAllCacheEntries ( void ( * callback ) ( void * , size_t ) ,
bool thread_safe ) {
if ( thread_safe ) {
mutex_ . Lock ( ) ;
}
const auto applyCallback = [ & ] ( ) {
table_ . ApplyToAllCacheEntries (
[ callback ] ( LRUHandle * h ) { callback ( h - > value , h - > charge ) ; } ) ;
} ;
if ( thread_safe ) {
mutex_ . Unlock ( ) ;
MutexLock l ( & mutex_ ) ;
applyCallback ( ) ;
} else {
applyCallback ( ) ;
}
}
void LRUCacheShard : : TEST_GetLRUList ( LRUHandle * * lru , LRUHandle * * lru_low_pri ) {
MutexLock l ( & mutex_ ) ;
* lru = & lru_ ;
* lru_low_pri = lru_low_pri_ ;
}
size_t LRUCacheShard : : TEST_GetLRUSize ( ) {
MutexLock l ( & mutex_ ) ;
LRUHandle * lru_handle = lru_ . next ;
size_t lru_size = 0 ;
while ( lru_handle ! = & lru_ ) {
@ -231,14 +224,13 @@ void LRUCacheShard::MaintainPoolSize() {
void LRUCacheShard : : EvictFromLRU ( size_t charge ,
autovector < LRUHandle * > * deleted ) {
while ( usage_ + charge > capacity_ & & lru_ . next ! = & lru_ ) {
while ( ( usage_ + charge ) > capacity_ & & lru_ . next ! = & lru_ ) {
LRUHandle * old = lru_ . next ;
assert ( old - > InCache ( ) ) ;
assert ( old - > refs = = 1 ) ; // LRU list contains elements which may be evicted
// LRU list contains only elements which can be evicted
assert ( old - > InCache ( ) & & ! old - > HasRefs ( ) ) ;
LRU_Remove ( old ) ;
table_ . Remove ( old - > key ( ) , old - > hash ) ;
old - > SetInCache ( false ) ;
Unref ( old ) ;
usage_ - = old - > charge ;
deleted - > push_back ( old ) ;
}
@ -252,8 +244,8 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_ ;
EvictFromLRU ( 0 , & last_reference_list ) ;
}
// we free the entries here outside of mutex for
// performance reasons
// Free the entries outside of mutex for performance reasons
for ( auto entry : last_reference_list ) {
entry - > Free ( ) ;
}
@ -269,22 +261,22 @@ Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
LRUHandle * e = table_ . Lookup ( key , hash ) ;
if ( e ! = nullptr ) {
assert ( e - > InCache ( ) ) ;
if ( e - > refs = = 1 ) {
if ( ! e - > HasRefs ( ) ) {
// The entry is in LRU since it's in hash and has no external references
LRU_Remove ( e ) ;
}
e - > refs + + ;
e - > Ref ( ) ;
e - > SetHit ( ) ;
}
return reinterpret_cast < Cache : : Handle * > ( e ) ;
}
bool LRUCacheShard : : Ref ( Cache : : Handle * h ) {
LRUHandle * handl e = reinterpret_cast < LRUHandle * > ( h ) ;
LRUHandle * e = reinterpret_cast < LRUHandle * > ( h ) ;
MutexLock l ( & mutex_ ) ;
if ( handle - > InCache ( ) & & handle - > refs = = 1 ) {
LRU_Remove ( handle ) ;
}
handle - > refs + + ;
// To create another reference - entry must be already externally referenced
assert ( e - > HasRefs ( ) ) ;
e - > Ref ( ) ;
return true ;
}
@ -303,30 +295,27 @@ bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
bool last_reference = false ;
{
MutexLock l ( & mutex_ ) ;
last_reference = Unref ( e ) ;
if ( last_reference ) {
usage_ - = e - > charge ;
}
if ( e - > refs = = 1 & & e - > InCache ( ) ) {
last_reference = e - > Unref ( ) ;
if ( last_reference & & e - > InCache ( ) ) {
// The item is still in cache, and nobody else holds a reference to it
if ( usage_ > capacity_ | | force_erase ) {
// the cache is full
// The LRU list must be empty since the cache is full
assert ( ! ( usage_ > capacity_ ) | | lru_ . next = = & lru_ ) ;
// t ake this opportunity and remove the item
assert ( lru_ . next = = & lru_ | | force_erase ) ;
// T ake this opportunity and remove the item
table_ . Remove ( e - > key ( ) , e - > hash ) ;
e - > SetInCache ( false ) ;
Unref ( e ) ;
usage_ - = e - > charge ;
last_reference = true ;
} else {
// put the item on the list to be potentially freed
// Put the item back on the LRU list, and don't free it
LRU_Insert ( e ) ;
last_reference = false ;
}
}
if ( last_reference ) {
usage_ - = e - > charge ;
}
}
// free outside of mutex
// Free the entry here outside of mutex for performance reasons
if ( last_reference ) {
e - > Free ( ) ;
}
@ -342,7 +331,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
// It shouldn't happen very often though.
LRUHandle * e = reinterpret_cast < LRUHandle * > (
new char [ sizeof ( LRUHandle ) - 1 + key . size ( ) ] ) ;
Status s ;
Status s = Status : : OK ( ) ;
autovector < LRUHandle * > last_reference_list ;
e - > value = value ;
@ -351,9 +340,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
e - > key_length = key . size ( ) ;
e - > flags = 0 ;
e - > hash = hash ;
e - > refs = ( handle = = nullptr
? 1
: 2 ) ; // One from LRUCache, one for the returned handle
e - > refs = 0 ;
e - > next = e - > prev = nullptr ;
e - > SetInCache ( true ) ;
e - > SetPriority ( priority ) ;
@ -366,11 +353,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
// is freed or the lru list is empty
EvictFromLRU ( charge , & last_reference_list ) ;
if ( usage_ - lru_usage_ + charge > capacity_ & &
if ( ( usage_ + charge ) > capacity_ & &
( strict_capacity_limit_ | | handle = = nullptr ) ) {
if ( handle = = nullptr ) {
// Don't insert the entry but still return ok, as if the entry inserted
// into cache and get evicted immediately.
e - > SetInCache ( false ) ;
last_reference_list . push_back ( e ) ;
} else {
delete [ ] reinterpret_cast < char * > ( e ) ;
@ -378,32 +366,30 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
s = Status : : Incomplete ( " Insert failed due to LRU cache being full. " ) ;
}
} else {
// insert into the cache
// note that the cache might get larger than its capacity if not enough
// space was freed
// Insert into the cache. Note that the cache might get larger than its
// capacity if not enough space was freed up.
LRUHandle * old = table_ . Insert ( e ) ;
usage_ + = e - > charge ;
if ( old ! = nullptr ) {
assert ( old - > InCache ( ) ) ;
old - > SetInCache ( false ) ;
if ( Unref ( old ) ) {
usage_ - = old - > charge ;
// old is on LRU because it's in cache and its reference count
// was just 1 (Unref returned 0)
if ( ! old - > HasRefs ( ) ) {
// old is on LRU because it's in cache and its reference count is 0
LRU_Remove ( old ) ;
usage_ - = old - > charge ;
last_reference_list . push_back ( old ) ;
}
}
if ( handle = = nullptr ) {
LRU_Insert ( e ) ;
} else {
e - > Ref ( ) ;
* handle = reinterpret_cast < Cache : : Handle * > ( e ) ;
}
s = Status : : OK ( ) ;
}
}
// we free the entries here outside of mutex for
// performance reasons
// Free the entries here outside of mutex for performance reasons
for ( auto entry : last_reference_list ) {
entry - > Free ( ) ;
}
@ -418,18 +404,18 @@ void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
MutexLock l ( & mutex_ ) ;
e = table_ . Remove ( key , hash ) ;
if ( e ! = nullptr ) {
last_reference = Unref ( e ) ;
if ( last_reference ) {
usage_ - = e - > charge ;
}
if ( last_reference & & e - > InCache ( ) ) {
assert ( e - > InCache ( ) ) ;
e - > SetInCache ( false ) ;
if ( ! e - > HasRefs ( ) ) {
// The entry is in LRU since it's in hash and has no external references
LRU_Remove ( e ) ;
usage_ - = e - > charge ;
last_reference = true ;
}
e - > SetInCache ( false ) ;
}
}
// mutex not held here
// Free the entry here outside of mutex for performance reasons
// last_reference will only be true if e != nullptr
if ( last_reference ) {
e - > Free ( ) ;