You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/db/blob/blob_source.h

154 lines
6.1 KiB

// Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <cinttypes>
#include <memory>
#include "cache/cache_helpers.h"
#include "cache/cache_key.h"
#include "db/blob/blob_file_cache.h"
#include "db/blob/blob_read_request.h"
#include "rocksdb/cache.h"
#include "rocksdb/rocksdb_namespace.h"
#include "table/block_based/cachable_entry.h"
#include "util/autovector.h"
namespace ROCKSDB_NAMESPACE {
struct ImmutableOptions;
class Status;
class FilePrefetchBuffer;
class Slice;
class BlobContents;
// BlobSource is a class that provides universal access to blobs, regardless of
// whether they are in the blob cache, secondary cache, or (remote) storage.
// Depending on user settings, it always fetch blobs from multi-tier cache and
// storage with minimal cost.
class BlobSource {
public:
BlobSource(const ImmutableOptions* immutable_options,
const std::string& db_id, const std::string& db_session_id,
BlobFileCache* blob_file_cache);
BlobSource(const BlobSource&) = delete;
BlobSource& operator=(const BlobSource&) = delete;
~BlobSource();
// Read a blob from the underlying cache or one blob file.
//
// If successful, returns ok and sets "*value" to the newly retrieved
// uncompressed blob. If there was an error while fetching the blob, sets
// "*value" to empty and returns a non-ok status.
//
// Note: For consistency, whether the blob is found in the cache or on disk,
// sets "*bytes_read" to the size of on-disk (possibly compressed) blob
// record.
Status GetBlob(const ReadOptions& read_options, const Slice& user_key,
uint64_t file_number, uint64_t offset, uint64_t file_size,
uint64_t value_size, CompressionType compression_type,
FilePrefetchBuffer* prefetch_buffer, PinnableSlice* value,
uint64_t* bytes_read);
// Read multiple blobs from the underlying cache or blob file(s).
//
// If successful, returns ok and sets "result" in the elements of "blob_reqs"
// to the newly retrieved uncompressed blobs. If there was an error while
// fetching one of blobs, sets its "result" to empty and sets its
// corresponding "status" to a non-ok status.
//
// Note:
// - The main difference between this function and MultiGetBlobFromOneFile is
// that this function can read multiple blobs from multiple blob files.
//
// - For consistency, whether the blob is found in the cache or on disk, sets
// "*bytes_read" to the total size of on-disk (possibly compressed) blob
// records.
void MultiGetBlob(const ReadOptions& read_options,
autovector<BlobFileReadRequests>& blob_reqs,
uint64_t* bytes_read);
// Read multiple blobs from the underlying cache or one blob file.
//
// If successful, returns ok and sets "result" in the elements of "blob_reqs"
// to the newly retrieved uncompressed blobs. If there was an error while
// fetching one of blobs, sets its "result" to empty and sets its
// corresponding "status" to a non-ok status.
//
// Note:
// - The main difference between this function and MultiGetBlob is that this
// function is only used for the case where the demanded blobs are stored in
// one blob file. MultiGetBlob will call this function multiple times if the
// demanded blobs are stored in multiple blob files.
//
// - For consistency, whether the blob is found in the cache or on disk, sets
// "*bytes_read" to the total size of on-disk (possibly compressed) blob
// records.
void MultiGetBlobFromOneFile(const ReadOptions& read_options,
uint64_t file_number, uint64_t file_size,
autovector<BlobReadRequest>& blob_reqs,
uint64_t* bytes_read);
inline Status GetBlobFileReader(
uint64_t blob_file_number,
CacheHandleGuard<BlobFileReader>* blob_file_reader) {
return blob_file_cache_->GetBlobFileReader(blob_file_number,
blob_file_reader);
}
inline Cache* GetBlobCache() const { return blob_cache_.get(); }
bool TEST_BlobInCache(uint64_t file_number, uint64_t file_size,
uint64_t offset, size_t* charge = nullptr) const;
private:
Status GetBlobFromCache(const Slice& cache_key,
CacheHandleGuard<BlobContents>* cached_blob) const;
Status PutBlobIntoCache(const Slice& cache_key,
std::unique_ptr<BlobContents>* blob,
CacheHandleGuard<BlobContents>* cached_blob) const;
static void PinCachedBlob(CacheHandleGuard<BlobContents>* cached_blob,
PinnableSlice* value);
static void PinOwnedBlob(std::unique_ptr<BlobContents>* owned_blob,
PinnableSlice* value);
Cache::Handle* GetEntryFromCache(const Slice& key) const;
Status InsertEntryIntoCache(const Slice& key, BlobContents* value,
size_t charge, Cache::Handle** cache_handle,
Cache::Priority priority) const;
Derive cache keys from SST unique IDs (#10394) Summary: ... so that cache keys can be derived from DB manifest data before reading the file from storage--so that every part of the file can potentially go in a persistent cache. See updated comments in cache_key.cc for technical details. Importantly, the new cache key encoding uses some fancy but efficient math to pack data into the cache key without depending on the sizes of the various pieces. This simplifies some existing code creating cache keys, like cache warming before the file size is known. This should provide us an essentially permanent mapping between SST unique IDs and base cache keys, with the ability to "upgrade" SST unique IDs (and thus cache keys) with new SST format_versions. These cache keys are of similar, perhaps indistinguishable quality to the previous generation. Before this change (see "corrected" days between collision): ``` ./cache_bench -stress_cache_key -sck_keep_bits=43 18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected) ``` After this change (keep 43 bits, up through 50, to validate "trajectory" is ok on "corrected" days between collision): ``` 19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected) 16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected) 15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected) 15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected) 15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected) 15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected) 15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected) 15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected) ``` However, the change does modify (probably weaken) the "guaranteed unique" promise from this > SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86 to this (see https://github.com/facebook/rocksdb/issues/10388) > With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits I don't think this is a practical concern, though. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394 Test Plan: unit tests updated, see simulation results above Reviewed By: jay-zhuang Differential Revision: D38667529 Pulled By: pdillinger fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
2 years ago
inline CacheKey GetCacheKey(uint64_t file_number, uint64_t /*file_size*/,
uint64_t offset) const {
Derive cache keys from SST unique IDs (#10394) Summary: ... so that cache keys can be derived from DB manifest data before reading the file from storage--so that every part of the file can potentially go in a persistent cache. See updated comments in cache_key.cc for technical details. Importantly, the new cache key encoding uses some fancy but efficient math to pack data into the cache key without depending on the sizes of the various pieces. This simplifies some existing code creating cache keys, like cache warming before the file size is known. This should provide us an essentially permanent mapping between SST unique IDs and base cache keys, with the ability to "upgrade" SST unique IDs (and thus cache keys) with new SST format_versions. These cache keys are of similar, perhaps indistinguishable quality to the previous generation. Before this change (see "corrected" days between collision): ``` ./cache_bench -stress_cache_key -sck_keep_bits=43 18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected) ``` After this change (keep 43 bits, up through 50, to validate "trajectory" is ok on "corrected" days between collision): ``` 19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected) 16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected) 15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected) 15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected) 15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected) 15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected) 15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected) 15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected) ``` However, the change does modify (probably weaken) the "guaranteed unique" promise from this > SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86 to this (see https://github.com/facebook/rocksdb/issues/10388) > With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits I don't think this is a practical concern, though. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394 Test Plan: unit tests updated, see simulation results above Reviewed By: jay-zhuang Differential Revision: D38667529 Pulled By: pdillinger fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
2 years ago
OffsetableCacheKey base_cache_key(db_id_, db_session_id_, file_number);
return base_cache_key.WithOffset(offset);
}
const std::string& db_id_;
const std::string& db_session_id_;
Statistics* statistics_;
// A cache to store blob file reader.
BlobFileCache* blob_file_cache_;
// A cache to store uncompressed blobs.
std::shared_ptr<Cache> blob_cache_;
// The control option of how the cache tiers will be used. Currently rocksdb
// support block/blob cache (volatile tier) and secondary cache (this tier
// isn't strictly speaking a non-volatile tier since the compressed cache in
// this tier is in volatile memory).
const CacheTier lowest_used_cache_tier_;
};
} // namespace ROCKSDB_NAMESPACE