|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "file/file_prefetch_buffer.h"
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
|
|
#include "memory/memory_allocator.h"
|
|
|
|
#include "options/cf_options.h"
|
|
|
|
#include "port/malloc.h"
|
|
|
|
#include "port/port.h" // noexcept
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "rocksdb/table.h"
|
|
|
|
#include "util/hash.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
class RandomAccessFile;
|
|
|
|
struct ReadOptions;
|
|
|
|
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
bool ShouldReportDetailedTime(Env* env, Statistics* stats);
|
|
|
|
|
|
|
|
// the length of the magic number in bytes.
|
|
|
|
constexpr uint32_t kMagicNumberLengthByte = 8;
|
|
|
|
|
|
|
|
// BlockHandle is a pointer to the extent of a file that stores a data
|
|
|
|
// block or a meta block.
|
|
|
|
class BlockHandle {
|
|
|
|
public:
|
|
|
|
// Creates a block handle with special values indicating "uninitialized,"
|
|
|
|
// distinct from the "null" block handle.
|
|
|
|
BlockHandle();
|
|
|
|
BlockHandle(uint64_t offset, uint64_t size);
|
|
|
|
|
|
|
|
// The offset of the block in the file.
|
|
|
|
uint64_t offset() const { return offset_; }
|
|
|
|
void set_offset(uint64_t _offset) { offset_ = _offset; }
|
|
|
|
|
|
|
|
// The size of the stored block
|
|
|
|
uint64_t size() const { return size_; }
|
|
|
|
void set_size(uint64_t _size) { size_ = _size; }
|
|
|
|
|
|
|
|
void EncodeTo(std::string* dst) const;
|
|
|
|
char* EncodeTo(char* dst) const;
|
|
|
|
Status DecodeFrom(Slice* input);
|
|
|
|
Status DecodeSizeFrom(uint64_t offset, Slice* input);
|
|
|
|
|
|
|
|
// Return a string that contains the copy of handle.
|
|
|
|
std::string ToString(bool hex = true) const;
|
|
|
|
|
|
|
|
// if the block handle's offset and size are both "0", we will view it
|
|
|
|
// as a null block handle that points to no where.
|
|
|
|
bool IsNull() const { return offset_ == 0 && size_ == 0; }
|
|
|
|
|
|
|
|
static const BlockHandle& NullBlockHandle() { return kNullBlockHandle; }
|
|
|
|
|
|
|
|
// Maximum encoding length of a BlockHandle
|
|
|
|
static constexpr uint32_t kMaxEncodedLength = 2 * kMaxVarint64Length;
|
|
|
|
|
|
|
|
inline bool operator==(const BlockHandle& rhs) const {
|
|
|
|
return offset_ == rhs.offset_ && size_ == rhs.size_;
|
|
|
|
}
|
|
|
|
inline bool operator!=(const BlockHandle& rhs) const {
|
|
|
|
return !(*this == rhs);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint64_t offset_;
|
|
|
|
uint64_t size_;
|
|
|
|
|
|
|
|
static const BlockHandle kNullBlockHandle;
|
|
|
|
};
|
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
5 years ago
|
|
|
// Value in block-based table file index.
|
|
|
|
//
|
|
|
|
// The index entry for block n is: y -> h, [x],
|
|
|
|
// where: y is some key between the last key of block n (inclusive) and the
|
|
|
|
// first key of block n+1 (exclusive); h is BlockHandle pointing to block n;
|
|
|
|
// x, if present, is the first key of block n (unshortened).
|
|
|
|
// This struct represents the "h, [x]" part.
|
|
|
|
struct IndexValue {
|
|
|
|
BlockHandle handle;
|
|
|
|
// Empty means unknown.
|
|
|
|
Slice first_internal_key;
|
|
|
|
|
|
|
|
IndexValue() = default;
|
|
|
|
IndexValue(BlockHandle _handle, Slice _first_internal_key)
|
|
|
|
: handle(_handle), first_internal_key(_first_internal_key) {}
|
|
|
|
|
|
|
|
// have_first_key indicates whether the `first_internal_key` is used.
|
|
|
|
// If previous_handle is not null, delta encoding is used;
|
|
|
|
// in this case, the two handles must point to consecutive blocks:
|
|
|
|
// handle.offset() ==
|
|
|
|
// previous_handle->offset() + previous_handle->size() + kBlockTrailerSize
|
|
|
|
void EncodeTo(std::string* dst, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle) const;
|
|
|
|
Status DecodeFrom(Slice* input, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle);
|
|
|
|
|
|
|
|
std::string ToString(bool hex, bool have_first_key) const;
|
|
|
|
};
|
|
|
|
|
|
|
|
inline uint32_t GetCompressFormatForVersion(uint32_t format_version) {
|
|
|
|
// As of format_version 2, we encode compressed block with
|
|
|
|
// compress_format_version == 2. Before that, the version is 1.
|
|
|
|
// DO NOT CHANGE THIS FUNCTION, it affects disk format
|
|
|
|
return format_version >= 2 ? 2 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr uint32_t kLatestFormatVersion = 5;
|
|
|
|
|
|
|
|
inline bool IsSupportedFormatVersion(uint32_t version) {
|
|
|
|
return version <= kLatestFormatVersion;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Footer encapsulates the fixed information stored at the tail end of every
|
|
|
|
// SST file. In general, it should only include things that cannot go
|
|
|
|
// elsewhere under the metaindex block. For example, checksum_type is
|
|
|
|
// required for verifying metaindex block checksum (when applicable), but
|
|
|
|
// index block handle can easily go in metaindex block (possible future).
|
|
|
|
// See also FooterBuilder below.
|
|
|
|
class Footer {
|
|
|
|
public:
|
|
|
|
// Create empty. Populate using DecodeFrom.
|
|
|
|
Footer() {}
|
|
|
|
|
|
|
|
// Deserialize a footer (populate fields) from `input` and check for various
|
|
|
|
// corruptions. `input_offset` is the offset within the target file of
|
|
|
|
// `input` buffer (future use).
|
|
|
|
// If enforce_table_magic_number != 0, will return corruption if table magic
|
|
|
|
// number is not equal to enforce_table_magic_number.
|
|
|
|
Status DecodeFrom(Slice input, uint64_t input_offset,
|
|
|
|
uint64_t enforce_table_magic_number = 0);
|
|
|
|
|
|
|
|
// Table magic number identifies file as RocksDB SST file and which kind of
|
|
|
|
// SST format is use.
|
|
|
|
uint64_t table_magic_number() const { return table_magic_number_; }
|
|
|
|
|
|
|
|
// A version (footer and more) within a kind of SST. (It would add more
|
|
|
|
// unnecessary complexity to separate footer versions and
|
|
|
|
// BBTO::format_version.)
|
|
|
|
uint32_t format_version() const { return format_version_; }
|
|
|
|
|
|
|
|
// Block handle for metaindex block.
|
|
|
|
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
|
|
|
|
|
|
|
// Block handle for (top-level) index block.
|
|
|
|
const BlockHandle& index_handle() const { return index_handle_; }
|
|
|
|
|
|
|
|
// Checksum type used in the file.
|
|
|
|
ChecksumType checksum_type() const {
|
|
|
|
return static_cast<ChecksumType>(checksum_type_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Block trailer size used by file with this footer (e.g. 5 for block-based
|
|
|
|
// table and 0 for plain table). This is inferred from magic number so
|
|
|
|
// not in the serialized form.
|
|
|
|
inline size_t GetBlockTrailerSize() const { return block_trailer_size_; }
|
|
|
|
|
|
|
|
// Convert this object to a human readable form
|
|
|
|
std::string ToString() const;
|
|
|
|
|
|
|
|
// Encoded lengths of Footers. Bytes for serialized Footer will always be
|
|
|
|
// >= kMinEncodedLength and <= kMaxEncodedLength.
|
|
|
|
//
|
|
|
|
// Footer version 0 (legacy) will always occupy exactly this many bytes.
|
|
|
|
// It consists of two block handles, padding, and a magic number.
|
|
|
|
static constexpr uint32_t kVersion0EncodedLength =
|
|
|
|
2 * BlockHandle::kMaxEncodedLength + kMagicNumberLengthByte;
|
|
|
|
static constexpr uint32_t kMinEncodedLength = kVersion0EncodedLength;
|
|
|
|
|
|
|
|
// Footer of versions 1 and higher will always occupy exactly this many
|
|
|
|
// bytes. It originally consisted of the checksum type, two block handles,
|
|
|
|
// padding (to maximum handle encoding size), a format version number, and a
|
|
|
|
// magic number.
|
|
|
|
static constexpr uint32_t kNewVersionsEncodedLength =
|
|
|
|
1 + 2 * BlockHandle::kMaxEncodedLength + 4 + kMagicNumberLengthByte;
|
|
|
|
static constexpr uint32_t kMaxEncodedLength = kNewVersionsEncodedLength;
|
|
|
|
|
|
|
|
static constexpr uint64_t kNullTableMagicNumber = 0;
|
|
|
|
|
|
|
|
static constexpr uint32_t kInvalidFormatVersion = 0xffffffffU;
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr int kInvalidChecksumType =
|
|
|
|
(1 << (sizeof(ChecksumType) * 8)) | kNoChecksum;
|
|
|
|
|
|
|
|
uint64_t table_magic_number_ = kNullTableMagicNumber;
|
|
|
|
uint32_t format_version_ = kInvalidFormatVersion;
|
|
|
|
BlockHandle metaindex_handle_;
|
|
|
|
BlockHandle index_handle_;
|
|
|
|
int checksum_type_ = kInvalidChecksumType;
|
|
|
|
uint8_t block_trailer_size_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Builder for Footer
|
|
|
|
class FooterBuilder {
|
|
|
|
public:
|
|
|
|
// Run builder in inputs. This is a single step with lots of parameters for
|
|
|
|
// efficiency (based on perf testing).
|
|
|
|
// * table_magic_number identifies file as RocksDB SST file and which kind of
|
|
|
|
// SST format is use.
|
|
|
|
// * format_version is a version for the footer and can also apply to other
|
|
|
|
// aspects of the SST file (see BlockBasedTableOptions::format_version).
|
|
|
|
// NOTE: To save complexity in the caller, when format_version == 0 and
|
|
|
|
// there is a corresponding legacy magic number to the one specified, the
|
|
|
|
// legacy magic number will be written for forward compatibility.
|
|
|
|
// * footer_offset is the file offset where the footer will be written
|
|
|
|
// (for future use).
|
|
|
|
// * checksum_type is for formats using block checksums.
|
|
|
|
// * index_handle is optional for some kinds of SST files.
|
|
|
|
void Build(uint64_t table_magic_number, uint32_t format_version,
|
|
|
|
uint64_t footer_offset, ChecksumType checksum_type,
|
|
|
|
const BlockHandle& metaindex_handle,
|
|
|
|
const BlockHandle& index_handle = BlockHandle::NullBlockHandle());
|
|
|
|
|
|
|
|
// After Builder, get a Slice for the serialized Footer, backed by this
|
|
|
|
// FooterBuilder.
|
|
|
|
const Slice& GetSlice() const {
|
|
|
|
assert(slice_.size());
|
|
|
|
return slice_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Slice slice_;
|
|
|
|
std::array<char, Footer::kMaxEncodedLength> data_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Read the footer from file
|
|
|
|
// If enforce_table_magic_number != 0, ReadFooterFromFile() will return
|
|
|
|
// corruption if table_magic number is not equal to enforce_table_magic_number
|
|
|
|
Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file,
|
|
|
|
FileSystem& fs, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
uint64_t file_size, Footer* footer,
|
|
|
|
uint64_t enforce_table_magic_number = 0);
|
|
|
|
|
|
|
|
// Computes a checksum using the given ChecksumType. Sometimes we need to
|
|
|
|
// include one more input byte logically at the end but not part of the main
|
|
|
|
// data buffer. If data_size >= 1, then
|
|
|
|
// ComputeBuiltinChecksum(type, data, size)
|
|
|
|
// ==
|
|
|
|
// ComputeBuiltinChecksumWithLastByte(type, data, size - 1, data[size - 1])
|
|
|
|
uint32_t ComputeBuiltinChecksum(ChecksumType type, const char* data,
|
|
|
|
size_t size);
|
|
|
|
uint32_t ComputeBuiltinChecksumWithLastByte(ChecksumType type, const char* data,
|
|
|
|
size_t size, char last_byte);
|
Implement XXH3 block checksum type (#9069)
Summary:
XXH3 - latest hash function that is extremely fast on large
data, easily faster than crc32c on most any x86_64 hardware. In
integrating this hash function, I have handled the compression type byte
in a non-standard way to avoid using the streaming API (extra data
movement and active code size because of hash function complexity). This
approach got a thumbs-up from Yann Collet.
Existing functionality change:
* reject bad ChecksumType in options with InvalidArgument
This change split off from https://github.com/facebook/rocksdb/issues/9058 because context-aware checksum is
likely to be handled through different configuration than ChecksumType.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9069
Test Plan:
tests updated, and substantially expanded. Unit tests now check
that we don't accidentally change the values generated by the checksum
algorithms ("schema test") and that we properly handle
invalid/unrecognized checksum types in options or in file footer.
DBTestBase::ChangeOptions (etc.) updated from two to one configuration
changing from default CRC32c ChecksumType. The point of this test code
is to detect possible interactions among features, and the likelihood of
some bad interaction being detected by including configurations other
than XXH3 and CRC32c--and then not detected by stress/crash test--is
extremely low.
Stress/crash test also updated (manual run long enough to see it accepts
new checksum type). db_bench also updated for microbenchmarking
checksums.
### Performance microbenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
./db_bench -benchmarks=crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3,crc32c,xxhash,xxhash64,xxh3
crc32c : 0.200 micros/op 5005220 ops/sec; 19551.6 MB/s (4096 per op)
xxhash : 0.807 micros/op 1238408 ops/sec; 4837.5 MB/s (4096 per op)
xxhash64 : 0.421 micros/op 2376514 ops/sec; 9283.3 MB/s (4096 per op)
xxh3 : 0.171 micros/op 5858391 ops/sec; 22884.3 MB/s (4096 per op)
crc32c : 0.206 micros/op 4859566 ops/sec; 18982.7 MB/s (4096 per op)
xxhash : 0.793 micros/op 1260850 ops/sec; 4925.2 MB/s (4096 per op)
xxhash64 : 0.410 micros/op 2439182 ops/sec; 9528.1 MB/s (4096 per op)
xxh3 : 0.161 micros/op 6202872 ops/sec; 24230.0 MB/s (4096 per op)
crc32c : 0.203 micros/op 4924686 ops/sec; 19237.1 MB/s (4096 per op)
xxhash : 0.839 micros/op 1192388 ops/sec; 4657.8 MB/s (4096 per op)
xxhash64 : 0.424 micros/op 2357391 ops/sec; 9208.6 MB/s (4096 per op)
xxh3 : 0.162 micros/op 6182678 ops/sec; 24151.1 MB/s (4096 per op)
As you can see, especially once warmed up, xxh3 is fastest.
### Performance macrobenchmark (PORTABLE=0 DEBUG_LEVEL=0, Broadwell processor)
Test
for I in `seq 1 50`; do for CHK in 0 1 2 3 4; do TEST_TMPDIR=/dev/shm/rocksdb$CHK ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=30000000 -checksum_type=$CHK 2>&1 | grep 'micros/op' | tee -a results-$CHK & done; wait; done
Results (ops/sec)
for FILE in results*; do echo -n "$FILE "; awk '{ s += $5; c++; } END { print 1.0 * s / c; }' < $FILE; done
results-0 252118 # kNoChecksum
results-1 251588 # kCRC32c
results-2 251863 # kxxHash
results-3 252016 # kxxHash64
results-4 252038 # kXXH3
Reviewed By: mrambacher
Differential Revision: D31905249
Pulled By: pdillinger
fbshipit-source-id: cb9b998ebe2523fc7c400eedf62124a78bf4b4d1
3 years ago
|
|
|
|
|
|
|
// Represents the contents of a block read from an SST file. Depending on how
|
|
|
|
// it's created, it may or may not own the actual block bytes. As an example,
|
|
|
|
// BlockContents objects representing data read from mmapped files only point
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
// into the mmapped region. Depending on context, it might be a serialized
|
|
|
|
// (potentially compressed) block, including a trailer beyond `size`, or an
|
|
|
|
// uncompressed block.
|
|
|
|
//
|
|
|
|
// Please try to use this terminology when dealing with blocks:
|
|
|
|
// * "Serialized block" - bytes that go into storage. For block-based table
|
|
|
|
// (usually the case) this includes the block trailer. Here the `size` does
|
|
|
|
// not include the trailer, but other places in code might include the trailer
|
|
|
|
// in the size.
|
|
|
|
// * "Maybe compressed block" - like a serialized block, but without the
|
|
|
|
// trailer (or no promise of including a trailer). Must be accompanied by a
|
|
|
|
// CompressionType in some other variable or field.
|
|
|
|
// * "Uncompressed block" - "payload" bytes that are either stored with no
|
|
|
|
// compression, used as input to compression function, or result of
|
|
|
|
// decompression function.
|
|
|
|
// * "Parsed block" - an in-memory form of a block in block cache, as it is
|
|
|
|
// used by the table reader. Different C++ types are used depending on the
|
|
|
|
// block type (see block_like_traits.h). Only trivially parsable block types
|
|
|
|
// use BlockContents as the parsed form.
|
|
|
|
//
|
|
|
|
struct BlockContents {
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
3 years ago
|
|
|
// Points to block payload (without trailer)
|
|
|
|
Slice data;
|
|
|
|
CacheAllocationPtr allocation;
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
3 years ago
|
|
|
// Whether there is a known trailer after what is pointed to by `data`.
|
|
|
|
// See BlockBasedTable::GetCompressionType.
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
bool has_trailer = false;
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
|
|
|
BlockContents() {}
|
|
|
|
|
|
|
|
// Does not take ownership of the underlying data bytes.
|
|
|
|
BlockContents(const Slice& _data) : data(_data) {}
|
|
|
|
|
|
|
|
// Takes ownership of the underlying data bytes.
|
|
|
|
BlockContents(CacheAllocationPtr&& _data, size_t _size)
|
|
|
|
: data(_data.get(), _size), allocation(std::move(_data)) {}
|
|
|
|
|
|
|
|
// Takes ownership of the underlying data bytes.
|
|
|
|
BlockContents(std::unique_ptr<char[]>&& _data, size_t _size)
|
|
|
|
: data(_data.get(), _size) {
|
|
|
|
allocation.reset(_data.release());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns whether the object has ownership of the underlying data bytes.
|
|
|
|
bool own_bytes() const { return allocation.get() != nullptr; }
|
|
|
|
|
|
|
|
// The additional memory space taken by the block data.
|
|
|
|
size_t usable_size() const {
|
|
|
|
if (allocation.get() != nullptr) {
|
|
|
|
auto allocator = allocation.get_deleter().allocator;
|
|
|
|
if (allocator) {
|
|
|
|
return allocator->UsableSize(allocation.get(), data.size());
|
|
|
|
}
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return malloc_usable_size(allocation.get());
|
|
|
|
#else
|
|
|
|
return data.size();
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
} else {
|
|
|
|
return 0; // no extra memory is occupied by the data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t ApproximateMemoryUsage() const {
|
|
|
|
return usable_size() + sizeof(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockContents(BlockContents&& other) noexcept { *this = std::move(other); }
|
|
|
|
|
|
|
|
BlockContents& operator=(BlockContents&& other) {
|
|
|
|
data = std::move(other.data);
|
|
|
|
allocation = std::move(other.allocation);
|
|
|
|
#ifndef NDEBUG
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
has_trailer = other.has_trailer;
|
|
|
|
#endif // NDEBUG
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
// The `data` points to serialized block contents read in from file, which
|
|
|
|
// must be compressed and include a trailer beyond `size`. A new buffer is
|
|
|
|
// allocated with the given allocator (or default) and the uncompressed
|
|
|
|
// contents are returned in `out_contents`.
|
|
|
|
// format_version is as defined in include/rocksdb/table.h, which is
|
|
|
|
// used to determine compression format version.
|
|
|
|
Status UncompressSerializedBlock(const UncompressionInfo& info,
|
|
|
|
const char* data, size_t size,
|
|
|
|
BlockContents* out_contents,
|
|
|
|
uint32_t format_version,
|
|
|
|
const ImmutableOptions& ioptions,
|
|
|
|
MemoryAllocator* allocator = nullptr);
|
|
|
|
|
|
|
|
// This is a variant of UncompressSerializedBlock that does not expect a
|
|
|
|
// block trailer beyond `size`. (CompressionType is taken from `info`.)
|
|
|
|
Status UncompressBlockData(const UncompressionInfo& info, const char* data,
|
|
|
|
size_t size, BlockContents* out_contents,
|
|
|
|
uint32_t format_version,
|
|
|
|
const ImmutableOptions& ioptions,
|
|
|
|
MemoryAllocator* allocator = nullptr);
|
|
|
|
|
|
|
|
// Replace db_host_id contents with the real hostname if necessary
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
|
|
|
Status ReifyDbHostIdProperty(Env* env, std::string* db_host_id);
|
|
|
|
|
|
|
|
// Implementation details follow. Clients should ignore,
|
|
|
|
|
|
|
|
// TODO(andrewkr): we should prefer one way of representing a null/uninitialized
|
|
|
|
// BlockHandle. Currently we use zeros for null and use negation-of-zeros for
|
|
|
|
// uninitialized.
|
|
|
|
inline BlockHandle::BlockHandle() : BlockHandle(~uint64_t{0}, ~uint64_t{0}) {}
|
|
|
|
|
|
|
|
inline BlockHandle::BlockHandle(uint64_t _offset, uint64_t _size)
|
|
|
|
: offset_(_offset), size_(_size) {}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|