@ -346,7 +346,14 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
virtual size_t ApproximateMemoryUsage ( ) const override {
assert ( index_block_ ) ;
return index_block_ - > ApproximateMemoryUsage ( ) ;
size_t usage = index_block_ - > ApproximateMemoryUsage ( ) ;
# ifdef ROCKSDB_MALLOC_USABLE_SIZE
usage + = malloc_usable_size ( ( void * ) this ) ;
# else
usage + = sizeof ( * this ) ;
# endif // ROCKSDB_MALLOC_USABLE_SIZE
// TODO(myabandeh): more accurate estimate of partition_map_ mem usage
return usage ;
}
private :
@ -415,7 +422,13 @@ class BinarySearchIndexReader : public IndexReader {
virtual size_t ApproximateMemoryUsage ( ) const override {
assert ( index_block_ ) ;
return index_block_ - > ApproximateMemoryUsage ( ) ;
size_t usage = index_block_ - > ApproximateMemoryUsage ( ) ;
# ifdef ROCKSDB_MALLOC_USABLE_SIZE
usage + = malloc_usable_size ( ( void * ) this ) ;
# else
usage + = sizeof ( * this ) ;
# endif // ROCKSDB_MALLOC_USABLE_SIZE
return usage ;
}
private :
@ -532,8 +545,14 @@ class HashIndexReader : public IndexReader {
virtual size_t ApproximateMemoryUsage ( ) const override {
assert ( index_block_ ) ;
return index_block_ - > ApproximateMemoryUsage ( ) +
prefixes_contents_ . data . size ( ) ;
size_t usage = index_block_ - > ApproximateMemoryUsage ( ) ;
usage + = prefixes_contents_ . usable_size ( ) ;
# ifdef ROCKSDB_MALLOC_USABLE_SIZE
usage + = malloc_usable_size ( ( void * ) this ) ;
# else
usage + = sizeof ( * this ) ;
# endif // ROCKSDB_MALLOC_USABLE_SIZE
return usage ;
}
private :
@ -1154,40 +1173,34 @@ Status BlockBasedTable::GetDataBlockFromCache(
assert ( block - > value - > compression_type ( ) = = kNoCompression ) ;
if ( block_cache ! = nullptr & & block - > value - > cachable ( ) & &
read_options . fill_cache ) {
s = block_cache - > Insert (
block_cache_key , block - > value , block - > value - > usable_size ( ) ,
& DeleteCachedEntry < Block > , & ( block - > cache_handle ) ) ;
block_cache - > TEST_mark_as_data_block ( block_cache_key ,
block - > value - > usable_size ( ) ) ;
size_t charge = block - > value - > ApproximateMemoryUsage ( ) ;
s = block_cache - > Insert ( block_cache_key , block - > value , charge ,
& DeleteCachedEntry < Block > ,
& ( block - > cache_handle ) ) ;
block_cache - > TEST_mark_as_data_block ( block_cache_key , charge ) ;
if ( s . ok ( ) ) {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , charge ) ;
}
if ( is_index ) {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_BYTES_INSERT , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_INDEX_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT , charge ) ;
}
} else {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_DATA_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_DATA_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_DATA_BYTES_INSERT , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_DATA_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_DATA_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_DATA_BYTES_INSERT , charge ) ;
}
}
} else {
@ -1243,7 +1256,7 @@ Status BlockBasedTable::PutDataBlockToCache(
if ( block_cache_compressed ! = nullptr & & raw_block ! = nullptr & &
raw_block - > cachable ( ) ) {
s = block_cache_compressed - > Insert ( compressed_block_cache_key , raw_block ,
raw_block - > usable_siz e( ) ,
raw_block - > ApproximateMemoryUsag e( ) ,
& DeleteCachedEntry < Block > ) ;
if ( s . ok ( ) ) {
// Avoid the following code to delete this cached block.
@ -1258,41 +1271,35 @@ Status BlockBasedTable::PutDataBlockToCache(
// insert into uncompressed block cache
assert ( ( block - > value - > compression_type ( ) = = kNoCompression ) ) ;
if ( block_cache ! = nullptr & & block - > value - > cachable ( ) ) {
s = block_cache - > Insert (
block_cache_key , block - > value , block - > value - > usable_size ( ) ,
& DeleteCachedEntry < Block > , & ( block - > cache_handle ) , priority ) ;
block_cache - > TEST_mark_as_data_block ( block_cache_key ,
block - > value - > usable_size ( ) ) ;
size_t charge = block - > value - > ApproximateMemoryUsage ( ) ;
s = block_cache - > Insert ( block_cache_key , block - > value , charge ,
& DeleteCachedEntry < Block > , & ( block - > cache_handle ) ,
priority ) ;
block_cache - > TEST_mark_as_data_block ( block_cache_key , charge ) ;
if ( s . ok ( ) ) {
assert ( block - > cache_handle ! = nullptr ) ;
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , charge ) ;
}
if ( is_index ) {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_INDEX_BYTES_INSERT , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_INDEX_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT , charge ) ;
}
} else {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_DATA_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_DATA_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_DATA_BYTES_INSERT , charge ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_DATA_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_DATA_BYTES_INSERT ,
block - > value - > usable_size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_DATA_BYTES_INSERT , charge ) ;
}
}
assert ( reinterpret_cast < Block * > (
@ -1429,24 +1436,23 @@ BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
filter = ReadFilter ( prefetch_buffer , filter_blk_handle ,
is_a_filter_partition , prefix_extractor ) ;
if ( filter ! = nullptr ) {
size_t usage = filter - > ApproximateMemoryUsage ( ) ;
Status s = block_cache - > Insert (
key , filter , filter - > size ( ) , & DeleteCachedFilterEntry , & cache_handle ,
key , filter , usage , & DeleteCachedFilterEntry , & cache_handle ,
rep_ - > table_options . cache_index_and_filter_blocks_with_high_priority
? Cache : : Priority : : HIGH
: Cache : : Priority : : LOW ) ;
if ( s . ok ( ) ) {
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , filter - > size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , usage ) ;
get_context - > RecordCounters ( BLOCK_CACHE_FILTER_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_FILTER_BYTES_INSERT ,
filter - > size ( ) ) ;
get_context - > RecordCounters ( BLOCK_CACHE_FILTER_BYTES_INSERT , usage ) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , filter - > size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , usage ) ;
RecordTick ( statistics , BLOCK_CACHE_FILTER_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_FILTER_BYTES_INSERT ,
filter - > size ( ) ) ;
RecordTick ( statistics , BLOCK_CACHE_FILTER_BYTES_INSERT , usage ) ;
}
} else {
RecordTick ( statistics , BLOCK_CACHE_ADD_FAILURES ) ;
@ -1512,27 +1518,27 @@ InternalIterator* BlockBasedTable::NewIndexIterator(
TEST_SYNC_POINT ( " BlockBasedTable::NewIndexIterator::thread1:1 " ) ;
TEST_SYNC_POINT ( " BlockBasedTable::NewIndexIterator::thread2:3 " ) ;
TEST_SYNC_POINT ( " BlockBasedTable::NewIndexIterator::thread1:4 " ) ;
size_t charge = 0 ;
if ( s . ok ( ) ) {
assert ( index_reader ! = nullptr ) ;
charge = index_reader - > ApproximateMemoryUsage ( ) ;
s = block_cache - > Insert (
key , index_reader , index_reader - > usable_size ( ) ,
& DeleteCachedIndexEntry , & cache_handle ,
key , index_reader , charge , & DeleteCachedIndexEntry , & cache_handle ,
rep_ - > table_options . cache_index_and_filter_blocks_with_high_priority
? Cache : : Priority : : HIGH
: Cache : : Priority : : LOW ) ;
}
if ( s . ok ( ) ) {
size_t usable_size = index_reader - > usable_size ( ) ;
if ( get_context ! = nullptr ) {
get_context - > RecordCounters ( BLOCK_CACHE_ADD , 1 ) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , usable_siz e) ;
get_context - > RecordCounters ( BLOCK_CACHE_BYTES_WRITE , charg e) ;
} else {
RecordTick ( statistics , BLOCK_CACHE_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , usable_siz e) ;
RecordTick ( statistics , BLOCK_CACHE_BYTES_WRITE , charg e) ;
}
RecordTick ( statistics , BLOCK_CACHE_INDEX_ADD ) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT , usable_siz e) ;
RecordTick ( statistics , BLOCK_CACHE_INDEX_BYTES_INSERT , charg e) ;
} else {
if ( index_reader ! = nullptr ) {
delete index_reader ;
@ -1661,8 +1667,9 @@ BlockIter* BlockBasedTable::NewDataBlockIterator(
static_cast < int > ( kExtraCacheKeyPrefix + kMaxVarint64Length ) ) ;
Slice unique_key =
Slice ( cache_key , static_cast < size_t > ( end - cache_key ) ) ;
s = block_cache - > Insert ( unique_key , nullptr , block . value - > usable_size ( ) ,
nullptr , & cache_handle ) ;
s = block_cache - > Insert ( unique_key , nullptr ,
block . value - > ApproximateMemoryUsage ( ) , nullptr ,
& cache_handle ) ;
if ( s . ok ( ) ) {
if ( cache_handle ! = nullptr ) {
iter - > RegisterCleanup ( & ForceReleaseCachedEntry , block_cache ,
@ -2998,7 +3005,7 @@ void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) {
FilterBlockReader * filter = reinterpret_cast < FilterBlockReader * > ( value ) ;
if ( filter - > statistics ( ) ! = nullptr ) {
RecordTick ( filter - > statistics ( ) , BLOCK_CACHE_FILTER_BYTES_EVICT ,
filter - > siz e( ) ) ;
filter - > ApproximateMemoryUsag e( ) ) ;
}
delete filter ;
}
@ -3007,7 +3014,7 @@ void DeleteCachedIndexEntry(const Slice& /*key*/, void* value) {
IndexReader * index_reader = reinterpret_cast < IndexReader * > ( value ) ;
if ( index_reader - > statistics ( ) ! = nullptr ) {
RecordTick ( index_reader - > statistics ( ) , BLOCK_CACHE_INDEX_BYTES_EVICT ,
index_reader - > usable_siz e( ) ) ;
index_reader - > ApproximateMemoryUsag e( ) ) ;
}
delete index_reader ;
}