@ -219,6 +219,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
kNoCompression , prefetch_buffer , & values [ i ] ,
& bytes_read ) ) ;
ASSERT_EQ ( values [ i ] , blobs [ i ] ) ;
ASSERT_FALSE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read ,
BlobLogRecord : : kHeaderSize + keys [ i ] . size ( ) + blob_sizes [ i ] ) ;
@ -256,6 +257,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
kNoCompression , prefetch_buffer , & values [ i ] ,
& bytes_read ) ) ;
ASSERT_EQ ( values [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read ,
BlobLogRecord : : kHeaderSize + keys [ i ] . size ( ) + blob_sizes [ i ] ) ;
@ -299,6 +301,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
kNoCompression , prefetch_buffer , & values [ i ] ,
& bytes_read ) ) ;
ASSERT_EQ ( values [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read ,
BlobLogRecord : : kHeaderSize + keys [ i ] . size ( ) + blob_sizes [ i ] ) ;
@ -337,6 +340,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
kNoCompression , prefetch_buffer , & values [ i ] ,
& bytes_read ) ) ;
ASSERT_EQ ( values [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read ,
BlobLogRecord : : kHeaderSize + keys [ i ] . size ( ) + blob_sizes [ i ] ) ;
@ -383,6 +387,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
& bytes_read )
. IsIncomplete ( ) ) ;
ASSERT_TRUE ( values [ i ] . empty ( ) ) ;
ASSERT_FALSE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read , 0 ) ;
ASSERT_FALSE ( blob_source . TEST_BlobInCache ( blob_file_number , file_size ,
@ -424,6 +429,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
& bytes_read )
. IsIOError ( ) ) ;
ASSERT_TRUE ( values [ i ] . empty ( ) ) ;
ASSERT_FALSE ( values [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read , 0 ) ;
ASSERT_FALSE ( blob_source . TEST_BlobInCache ( file_number , file_size ,
@ -856,6 +862,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
if ( i % 2 = = 0 ) {
ASSERT_OK ( statuses_buf [ i ] ) ;
ASSERT_EQ ( value_buf [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( value_buf [ i ] . IsPinned ( ) ) ;
fs_read_bytes + =
blob_sizes [ i ] + keys [ i ] . size ( ) + BlobLogRecord : : kHeaderSize ;
ASSERT_TRUE ( blob_source . TEST_BlobInCache ( blob_file_number , file_size ,
@ -864,6 +871,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
} else {
statuses_buf [ i ] . PermitUncheckedError ( ) ;
ASSERT_TRUE ( value_buf [ i ] . empty ( ) ) ;
ASSERT_FALSE ( value_buf [ i ] . IsPinned ( ) ) ;
ASSERT_FALSE ( blob_source . TEST_BlobInCache ( blob_file_number , file_size ,
blob_offsets [ i ] ) ) ;
}
@ -896,6 +904,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
kNoCompression , prefetch_buffer ,
& value_buf [ i ] , & bytes_read ) ) ;
ASSERT_EQ ( value_buf [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( value_buf [ i ] . IsPinned ( ) ) ;
ASSERT_EQ ( bytes_read ,
BlobLogRecord : : kHeaderSize + keys [ i ] . size ( ) + blob_sizes [ i ] ) ;
@ -921,6 +930,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
for ( size_t i = 0 ; i < num_blobs ; + + i ) {
ASSERT_OK ( statuses_buf [ i ] ) ;
ASSERT_EQ ( value_buf [ i ] , blobs [ i ] ) ;
ASSERT_TRUE ( value_buf [ i ] . IsPinned ( ) ) ;
ASSERT_TRUE ( blob_source . TEST_BlobInCache ( blob_file_number , file_size ,
blob_offsets [ i ] ) ) ;
blob_bytes + = blob_sizes [ i ] ;
@ -969,6 +979,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
for ( size_t i = 0 ; i < num_blobs ; + + i ) {
ASSERT_TRUE ( statuses_buf [ i ] . IsIncomplete ( ) ) ;
ASSERT_TRUE ( value_buf [ i ] . empty ( ) ) ;
ASSERT_FALSE ( value_buf [ i ] . IsPinned ( ) ) ;
ASSERT_FALSE ( blob_source . TEST_BlobInCache ( blob_file_number , file_size ,
blob_offsets [ i ] ) ) ;
}
@ -1012,6 +1023,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
for ( size_t i = 0 ; i < num_blobs ; + + i ) {
ASSERT_TRUE ( statuses_buf [ i ] . IsIOError ( ) ) ;
ASSERT_TRUE ( value_buf [ i ] . empty ( ) ) ;
ASSERT_FALSE ( value_buf [ i ] . IsPinned ( ) ) ;
ASSERT_FALSE ( blob_source . TEST_BlobInCache ( non_existing_file_number ,
file_size , blob_offsets [ i ] ) ) ;
}
@ -1153,6 +1165,9 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_TRUE (
blob_source . TEST_BlobInCache ( file_number , file_size , blob_offsets [ 0 ] ) ) ;
// Release cache handle
values [ 0 ] . Reset ( ) ;
// key0 should be demoted to the secondary cache, and key1 should be filled
// to the primary cache from the blob file.
ASSERT_OK ( blob_source . GetBlob ( read_options , keys [ 1 ] , file_number ,
@ -1163,6 +1178,9 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_TRUE (
blob_source . TEST_BlobInCache ( file_number , file_size , blob_offsets [ 1 ] ) ) ;
// Release cache handle
values [ 1 ] . Reset ( ) ;
OffsetableCacheKey base_cache_key ( db_id_ , db_session_id_ , file_number ) ;
// blob_cache here only looks at the primary cache since we didn't provide
@ -1219,6 +1237,9 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
& values [ 0 ] , nullptr /* bytes_read */ ) ) ;
ASSERT_EQ ( values [ 0 ] , blobs [ 0 ] ) ;
// Release cache handle
values [ 0 ] . Reset ( ) ;
// key0 should be in the primary cache.
CacheKey cache_key0 = base_cache_key . WithOffset ( blob_offsets [ 0 ] ) ;
const Slice key0 = cache_key0 . AsSlice ( ) ;
@ -1379,11 +1400,11 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
ReadOptions read_options ;
read_options . verify_checksums = true ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
{
read_options . fill_cache = false ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
for ( size_t i = 0 ; i < kNumBlobs ; + + i ) {
ASSERT_OK ( blob_source . GetBlob (
read_options , keys_ [ i ] , kBlobFileNumber , blob_offsets [ i ] ,
@ -1397,6 +1418,8 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
{
read_options . fill_cache = true ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
// num_blobs is 16, so the total blob cache usage is less than a single
// dummy entry. Therefore, cache reservation manager only reserves one dummy
// entry here.
@ -1434,8 +1457,8 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
// cache usage after erasing the cache entry.
blob_source . GetBlobCache ( ) - > Erase ( cache_key . AsSlice ( ) ) ;
if ( i = = kNumBlobs - 1 ) {
// The last blob is not in the cache. cache_res_mgr should not reserve
// any space for i t.
// All the blobs got removed from the cache. cache_res_mgr should not
// reserve any space for them .
ASSERT_EQ ( cache_res_mgr - > GetTotalReservedCacheSize ( ) , 0 ) ;
} else {
ASSERT_EQ ( cache_res_mgr - > GetTotalReservedCacheSize ( ) , kSizeDummyEntry ) ;
@ -1498,11 +1521,11 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservationOnFullCache) {
ReadOptions read_options ;
read_options . verify_checksums = true ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
{
read_options . fill_cache = false ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
for ( size_t i = 0 ; i < kNumBlobs ; + + i ) {
ASSERT_OK ( blob_source . GetBlob (
read_options , keys_ [ i ] , kBlobFileNumber , blob_offsets [ i ] ,
@ -1516,6 +1539,8 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservationOnFullCache) {
{
read_options . fill_cache = true ;
std : : vector < PinnableSlice > values ( keys_ . size ( ) ) ;
// Since we resized each blob to be kSizeDummyEntry / (num_blobs / 2), we
// can't fit all the blobs in the cache at the same time, which means we
// should observe cache evictions once we reach the cache's capacity.
@ -1528,6 +1553,9 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservationOnFullCache) {
blob_file_size_ , blob_sizes [ i ] , kNoCompression ,
nullptr /* prefetch_buffer */ , & values [ i ] , nullptr /* bytes_read */ ) ) ;
// Release cache handle
values [ i ] . Reset ( ) ;
if ( i < kNumBlobs / 2 - 1 ) {
size_t charge = 0 ;
ASSERT_TRUE ( blob_source . TEST_BlobInCache (