@ -73,6 +73,16 @@ inline void FreeDataMarkEmpty(ClockHandle& h, MemoryAllocator* allocator) {
MarkEmpty ( h ) ;
MarkEmpty ( h ) ;
}
}
// Called to undo the effect of referencing an entry for internal purposes,
// so it should not be marked as having been used.
inline void Unref ( const ClockHandle & h , uint64_t count = 1 ) {
// Pretend we never took the reference
// WART: there's a tiny chance we release last ref to invisible
// entry here. If that happens, we let eviction take care of it.
h . meta . fetch_sub ( ClockHandle : : kAcquireIncrement * count ,
std : : memory_order_release ) ;
}
inline bool ClockUpdate ( ClockHandle & h ) {
inline bool ClockUpdate ( ClockHandle & h ) {
uint64_t meta = h . meta . load ( std : : memory_order_relaxed ) ;
uint64_t meta = h . meta . load ( std : : memory_order_relaxed ) ;
@ -231,25 +241,18 @@ inline bool BeginSlotInsert(const ClockHandleBasicData& proto, ClockHandle& h,
* already_matches = true ;
* already_matches = true ;
return false ;
return false ;
} else {
} else {
// Mismatch. Pretend we never took the reference
// Mismatch.
old_meta =
Unref ( h , initial_countdown ) ;
h . meta . fetch_sub ( ClockHandle : : kAcquireIncrement * initial_countdown ,
std : : memory_order_acq_rel ) ;
}
}
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
ClockHandle : : kStateInvisible ) ) {
ClockHandle : : kStateInvisible ) ) {
// Pretend we never took the reference
// Pretend we never took the reference
// WART/FIXME?: there's a tiny chance we release last ref to invisible
Unref ( h , initial_countdown ) ;
// entry here. If that happens, we let eviction take care of it.
old_meta =
h . meta . fetch_sub ( ClockHandle : : kAcquireIncrement * initial_countdown ,
std : : memory_order_acq_rel ) ;
} else {
} else {
// For other states, incrementing the acquire counter has no effect
// For other states, incrementing the acquire counter has no effect
// so we don't need to undo it.
// so we don't need to undo it.
// Slot not usable / touchable now.
// Slot not usable / touchable now.
}
}
( void ) old_meta ;
return false ;
return false ;
}
}
@ -289,9 +292,10 @@ bool TryInsert(const ClockHandleBasicData& proto, ClockHandle& h,
return b ;
return b ;
}
}
// Func must be const HandleImpl& -> void callable
template < class HandleImpl , class Func >
template < class HandleImpl , class Func >
void ConstApplyToEntriesRange ( Func /*const HandleImpl& -> void*/ func ,
void ConstApplyToEntriesRange ( const Func & func , const HandleImpl * begin ,
const HandleImpl * begin , const HandleImpl * end ,
const HandleImpl * end ,
bool apply_if_will_be_deleted ) {
bool apply_if_will_be_deleted ) {
uint64_t check_state_mask = ClockHandle : : kStateShareableBit ;
uint64_t check_state_mask = ClockHandle : : kStateShareableBit ;
if ( ! apply_if_will_be_deleted ) {
if ( ! apply_if_will_be_deleted ) {
@ -316,8 +320,7 @@ void ConstApplyToEntriesRange(Func /*const HandleImpl& -> void*/ func,
func ( * h ) ;
func ( * h ) ;
}
}
// Pretend we never took the reference
// Pretend we never took the reference
h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
Unref ( * h ) ;
std : : memory_order_release ) ;
// No net change, so don't need to check for overflow
// No net change, so don't need to check for overflow
} else {
} else {
// For other states, incrementing the acquire counter has no effect
// For other states, incrementing the acquire counter has no effect
@ -419,22 +422,20 @@ Status BaseClockTable::ChargeUsageMaybeEvictStrict(
request_evict_charge = 1 ;
request_evict_charge = 1 ;
}
}
if ( request_evict_charge > 0 ) {
if ( request_evict_charge > 0 ) {
size_t evicted_charge = 0 ;
EvictionData data ;
size_t evicted_count = 0 ;
static_cast < Table * > ( this ) - > Evict ( request_evict_charge , state , & data ) ;
static_cast < Table * > ( this ) - > Evict ( request_evict_charge , & evicted_charge ,
occupancy_ . fetch_sub ( data . freed_count , std : : memory_order_release ) ;
& evicted_count , state ) ;
if ( LIKELY ( data . freed_charge > need_evict_charge ) ) {
occupancy_ . fetch_sub ( evicted_count , std : : memory_order_release ) ;
assert ( data . freed_count > 0 ) ;
if ( LIKELY ( evicted_charge > need_evict_charge ) ) {
assert ( evicted_count > 0 ) ;
// Evicted more than enough
// Evicted more than enough
usage_ . fetch_sub ( evict ed_charge - need_evict_charge ,
usage_ . fetch_sub ( data . freed_charge - need_evict_charge ,
std : : memory_order_relaxed ) ;
std : : memory_order_relaxed ) ;
} else if ( evict ed_charge < need_evict_charge | |
} else if ( data . fre ed_charge < need_evict_charge | |
( UNLIKELY ( need_evict_for_occupancy ) & & evict ed_count = = 0 ) ) {
( UNLIKELY ( need_evict_for_occupancy ) & & data . fre ed_count = = 0 ) ) {
// Roll back to old usage minus evicted
// Roll back to old usage minus evicted
usage_ . fetch_sub ( evict ed_charge + ( new_usage - old_usage ) ,
usage_ . fetch_sub ( data . fre ed_charge + ( new_usage - old_usage ) ,
std : : memory_order_relaxed ) ;
std : : memory_order_relaxed ) ;
if ( evict ed_charge < need_evict_charge ) {
if ( data . fre ed_charge < need_evict_charge ) {
return Status : : MemoryLimit (
return Status : : MemoryLimit (
" Insert failed because unable to evict entries to stay within "
" Insert failed because unable to evict entries to stay within "
" capacity limit. " ) ;
" capacity limit. " ) ;
@ -446,7 +447,7 @@ Status BaseClockTable::ChargeUsageMaybeEvictStrict(
}
}
// If we needed to evict something and we are proceeding, we must have
// If we needed to evict something and we are proceeding, we must have
// evicted something.
// evicted something.
assert ( evict ed_count > 0 ) ;
assert ( data . fre ed_count > 0 ) ;
}
}
return Status : : OK ( ) ;
return Status : : OK ( ) ;
}
}
@ -488,29 +489,47 @@ inline bool BaseClockTable::ChargeUsageMaybeEvictNonStrict(
// deal with occupancy
// deal with occupancy
need_evict_charge = 1 ;
need_evict_charge = 1 ;
}
}
size_t evicted_charge = 0 ;
EvictionData data ;
size_t evicted_count = 0 ;
if ( need_evict_charge > 0 ) {
if ( need_evict_charge > 0 ) {
static_cast < Table * > ( this ) - > Evict ( need_evict_charge , & evicted_charge ,
static_cast < Table * > ( this ) - > Evict ( need_evict_charge , state , & data ) ;
& evicted_count , state ) ;
// Deal with potential occupancy deficit
// Deal with potential occupancy deficit
if ( UNLIKELY ( need_evict_for_occupancy ) & & evict ed_count = = 0 ) {
if ( UNLIKELY ( need_evict_for_occupancy ) & & data . fre ed_count = = 0 ) {
assert ( evict ed_charge = = 0 ) ;
assert ( data . fre ed_charge = = 0 ) ;
// Can't meet occupancy requirement
// Can't meet occupancy requirement
return false ;
return false ;
} else {
} else {
// Update occupancy for evictions
// Update occupancy for evictions
occupancy_ . fetch_sub ( evict ed_count, std : : memory_order_release ) ;
occupancy_ . fetch_sub ( data . fre ed_count, std : : memory_order_release ) ;
}
}
}
}
// Track new usage even if we weren't able to evict enough
// Track new usage even if we weren't able to evict enough
usage_ . fetch_add ( total_charge - evict ed_charge, std : : memory_order_relaxed ) ;
usage_ . fetch_add ( total_charge - data . fre ed_charge, std : : memory_order_relaxed ) ;
// No underflow
// No underflow
assert ( usage_ . load ( std : : memory_order_relaxed ) < SIZE_MAX / 2 ) ;
assert ( usage_ . load ( std : : memory_order_relaxed ) < SIZE_MAX / 2 ) ;
// Success
// Success
return true ;
return true ;
}
}
void BaseClockTable : : TrackAndReleaseEvictedEntry (
ClockHandle * h , BaseClockTable : : EvictionData * data ) {
data - > freed_charge + = h - > GetTotalCharge ( ) ;
data - > freed_count + = 1 ;
bool took_value_ownership = false ;
if ( eviction_callback_ ) {
// For key reconstructed from hash
UniqueId64x2 unhashed ;
took_value_ownership =
eviction_callback_ ( ClockCacheShard < HyperClockTable > : : ReverseHash (
h - > GetHash ( ) , & unhashed , hash_seed_ ) ,
reinterpret_cast < Cache : : Handle * > ( h ) ) ;
}
if ( ! took_value_ownership ) {
h - > FreeData ( allocator_ ) ;
}
MarkEmpty ( * h ) ;
}
template < class Table >
template < class Table >
Status BaseClockTable : : Insert ( const ClockHandleBasicData & proto ,
Status BaseClockTable : : Insert ( const ClockHandleBasicData & proto ,
typename Table : : HandleImpl * * handle ,
typename Table : : HandleImpl * * handle ,
@ -800,23 +819,18 @@ HyperClockTable::HandleImpl* HyperClockTable::Lookup(
return true ;
return true ;
} else {
} else {
// Mismatch. Pretend we never took the reference
// Mismatch. Pretend we never took the reference
old_meta = h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
Unref ( * h ) ;
std : : memory_order_release ) ;
}
}
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
ClockHandle : : kStateInvisible ) ) {
ClockHandle : : kStateInvisible ) ) {
// Pretend we never took the reference
// Pretend we never took the reference
// WART: there's a tiny chance we release last ref to invisible
Unref ( * h ) ;
// entry here. If that happens, we let eviction take care of it.
old_meta = h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
std : : memory_order_release ) ;
} else {
} else {
// For other states, incrementing the acquire counter has no effect
// For other states, incrementing the acquire counter has no effect
// so we don't need to undo it. Furthermore, we cannot safely undo
// so we don't need to undo it. Furthermore, we cannot safely undo
// it because we did not acquire a read reference to lock the
// it because we did not acquire a read reference to lock the
// entry in a Shareable state.
// entry in a Shareable state.
}
}
( void ) old_meta ;
return false ;
return false ;
} ,
} ,
[ & ] ( HandleImpl * h ) {
[ & ] ( HandleImpl * h ) {
@ -941,8 +955,7 @@ void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
if ( refcount > 1 ) {
if ( refcount > 1 ) {
// Not last ref at some point in time during this Erase call
// Not last ref at some point in time during this Erase call
// Pretend we never took the reference
// Pretend we never took the reference
h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
Unref ( * h ) ;
std : : memory_order_release ) ;
break ;
break ;
} else if ( h - > meta . compare_exchange_weak (
} else if ( h - > meta . compare_exchange_weak (
old_meta ,
old_meta ,
@ -962,16 +975,12 @@ void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
}
}
} else {
} else {
// Mismatch. Pretend we never took the reference
// Mismatch. Pretend we never took the reference
h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
Unref ( * h ) ;
std : : memory_order_release ) ;
}
}
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
} else if ( UNLIKELY ( ( old_meta > > ClockHandle : : kStateShift ) = =
ClockHandle : : kStateInvisible ) ) {
ClockHandle : : kStateInvisible ) ) {
// Pretend we never took the reference
// Pretend we never took the reference
// WART: there's a tiny chance we release last ref to invisible
Unref ( * h ) ;
// entry here. If that happens, we let eviction take care of it.
h - > meta . fetch_sub ( ClockHandle : : kAcquireIncrement ,
std : : memory_order_release ) ;
} else {
} else {
// For other states, incrementing the acquire counter has no effect
// For other states, incrementing the acquire counter has no effect
// so we don't need to undo it.
// so we don't need to undo it.
@ -1007,8 +1016,8 @@ void HyperClockTable::EraseUnRefEntries() {
template < typename MatchFn , typename AbortFn , typename UpdateFn >
template < typename MatchFn , typename AbortFn , typename UpdateFn >
inline HyperClockTable : : HandleImpl * HyperClockTable : : FindSlot (
inline HyperClockTable : : HandleImpl * HyperClockTable : : FindSlot (
const UniqueId64x2 & hashed_key , MatchFn match_fn , AbortFn abort _fn,
const UniqueId64x2 & hashed_key , const MatchFn & match _fn,
UpdateFn update_fn ) {
const AbortFn & abort_fn , const UpdateFn & update_fn ) {
// NOTE: upper 32 bits of hashed_key[0] is used for sharding
// NOTE: upper 32 bits of hashed_key[0] is used for sharding
//
//
// We use double-hashing probing. Every probe in the sequence is a
// We use double-hashing probing. Every probe in the sequence is a
@ -1062,9 +1071,8 @@ inline void HyperClockTable::ReclaimEntryUsage(size_t total_charge) {
assert ( old_usage > = total_charge ) ;
assert ( old_usage > = total_charge ) ;
}
}
inline void HyperClockTable : : Evict ( size_t requested_charge ,
inline void HyperClockTable : : Evict ( size_t requested_charge , InsertState & ,
size_t * freed_charge , size_t * freed_count ,
EvictionData * data ) {
InsertState & ) {
// precondition
// precondition
assert ( requested_charge > 0 ) ;
assert ( requested_charge > 0 ) ;
@ -1083,33 +1091,18 @@ inline void HyperClockTable::Evict(size_t requested_charge,
uint64_t max_clock_pointer =
uint64_t max_clock_pointer =
old_clock_pointer + ( ClockHandle : : kMaxCountdown < < length_bits_ ) ;
old_clock_pointer + ( ClockHandle : : kMaxCountdown < < length_bits_ ) ;
// For key reconstructed from hash
UniqueId64x2 unhashed ;
for ( ; ; ) {
for ( ; ; ) {
for ( size_t i = 0 ; i < step_size ; i + + ) {
for ( size_t i = 0 ; i < step_size ; i + + ) {
HandleImpl & h = array_ [ ModTableSize ( Lower32of64 ( old_clock_pointer + i ) ) ] ;
HandleImpl & h = array_ [ ModTableSize ( Lower32of64 ( old_clock_pointer + i ) ) ] ;
bool evicting = ClockUpdate ( h ) ;
bool evicting = ClockUpdate ( h ) ;
if ( evicting ) {
if ( evicting ) {
Rollback ( h . hashed_key , & h ) ;
Rollback ( h . hashed_key , & h ) ;
* freed_charge + = h . GetTotalCharge ( ) ;
TrackAndReleaseEvictedEntry ( & h , data ) ;
* freed_count + = 1 ;
bool took_ownership = false ;
if ( eviction_callback_ ) {
took_ownership =
eviction_callback_ ( ClockCacheShard < HyperClockTable > : : ReverseHash (
h . GetHash ( ) , & unhashed , hash_seed_ ) ,
reinterpret_cast < Cache : : Handle * > ( & h ) ) ;
}
if ( ! took_ownership ) {
h . FreeData ( allocator_ ) ;
}
MarkEmpty ( h ) ;
}
}
}
}
// Loop exit condition
// Loop exit condition
if ( * freed_charge > = requested_charge ) {
if ( data - > freed_charge > = requested_charge ) {
return ;
return ;
}
}
if ( old_clock_pointer > = max_clock_pointer ) {
if ( old_clock_pointer > = max_clock_pointer ) {