You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
rocksdb/utilities/blob_db/blob_dump_tool.cc

281 lines
9.1 KiB

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
Fix build errors in blob_dump_tool with GCC 4.8 Summary: Fixing the build errors seen with GCC 4.8.1. ``` Makefile:105: Warning: Compiling in debug mode. Don't use the resulting binary in production utilities/blob_db/blob_dump_tool.cc: In member function ‘rocksdb::Status rocksdb::blob_db::BlobDumpTool::DumpBlobLogFooter(uint64_t, uint64_t*)’: utilities/blob_db/blob_dump_tool.cc:149:42: error: expected ‘)’ before ‘PRIu64’ fprintf(stdout, " Blob count : %" PRIu64 "\n", footer.GetBlobCount()); ^ utilities/blob_db/blob_dump_tool.cc:149:76: error: spurious trailing ‘%’ in format [-Werror=format=] fprintf(stdout, " Blob count : %" PRIu64 "\n", footer.GetBlobCount()); ^ utilities/blob_db/blob_dump_tool.cc:149:76: error: too many arguments for format [-Werror=format-extra-args] utilities/blob_db/blob_dump_tool.cc: In member function ‘rocksdb::Status rocksdb::blob_db::BlobDumpTool::DumpRecord(rocksdb::blob_db::BlobDumpTool::DisplayType, rocksdb::blob_db::BlobDumpTool::DisplayType, uint64_t*)’: utilities/blob_db/blob_dump_tool.cc:161:49: error: expected ‘)’ before ‘PRIx64’ fprintf(stdout, "Read record with offset 0x%" PRIx64 " (%" PRIu64 "):\n", ^ utilities/blob_db/blob_dump_tool.cc:162:27: error: spurious trailing ‘%’ in format [-Werror=format=] *offset, *offset); ^ utilities/blob_db/blob_dump_tool.cc:162:27: error: too many arguments for format [-Werror=format-extra-args] utilities/blob_db/blob_dump_tool.cc:176:38: error: expected ‘)’ before ‘PRIu64’ fprintf(stdout, " blob size : %" PRIu64 "\n", record.GetBlobSize()); ^ utilities/blob_db/blob_dump_tool.cc:176:71: error: spurious trailing ‘%’ in format [-Werror=format=] fprintf(stdout, " blob size : %" PRIu64 "\n", record.GetBlobSize()); ^ utilities/blob_db/blob_dump_tool.cc:176:71: error: too many arguments for format [-Werror=format-extra-args] utilities/blob_db/blob_dump_tool.cc:178:38: error: expected ‘)’ before ‘PRIu64’ fprintf(stdout, " time : %" PRIu64 "\n", record.GetTimeVal()); ^ utilities/blob_db/blob_dump_tool.cc:178:70: error: spurious trailing ‘%’ in format [-Werror=format=] fprintf(stdout, " time : %" PRIu64 "\n", record.GetTimeVal()); ^ utilities/blob_db/blob_dump_tool.cc:178:70: error: too many arguments for format [-Werror=format-extra-args] utilities/blob_db/blob_dump_tool.cc:214:38: error: expected ‘)’ before ‘PRIu64’ fprintf(stdout, " sequence : %" PRIu64 "\n", record.GetSN()); ^ utilities/blob_db/blob_dump_tool.cc:214:65: error: spurious trailing ‘%’ in format [-Werror=format=] fprintf(stdout, " sequence : %" PRIu64 "\n", record.GetSN()); ``` Closes https://github.com/facebook/rocksdb/pull/2359 Differential Revision: D5117684 Pulled By: sagar0 fbshipit-source-id: 7480346bcd96205fcae890927c5e68cf004e87be
8 years ago
#include "utilities/blob_db/blob_dump_tool.h"
#include <stdio.h>
#include <cinttypes>
#include <iostream>
#include <memory>
#include <string>
#include "file/random_access_file_reader.h"
#include "file/readahead_raf.h"
#include "port/port.h"
#include "rocksdb/convenience.h"
#include "rocksdb/file_system.h"
#include "table/format.h"
#include "util/coding.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
BlobDumpTool::BlobDumpTool()
: reader_(nullptr), buffer_(nullptr), buffer_size_(0) {}
Status BlobDumpTool::Run(const std::string& filename, DisplayType show_key,
DisplayType show_blob,
DisplayType show_uncompressed_blob,
bool show_summary) {
constexpr size_t kReadaheadSize = 2 * 1024 * 1024;
Status s;
const auto fs = FileSystem::Default();
IOOptions io_opts;
s = fs->FileExists(filename, io_opts, nullptr);
if (!s.ok()) {
return s;
}
uint64_t file_size = 0;
s = fs->GetFileSize(filename, io_opts, &file_size, nullptr);
if (!s.ok()) {
return s;
}
std::unique_ptr<FSRandomAccessFile> file;
s = fs->NewRandomAccessFile(filename, FileOptions(), &file, nullptr);
if (!s.ok()) {
return s;
}
file = NewReadaheadRandomAccessFile(std::move(file), kReadaheadSize);
if (file_size == 0) {
return Status::Corruption("File is empty.");
}
reader_.reset(new RandomAccessFileReader(std::move(file), filename));
uint64_t offset = 0;
uint64_t footer_offset = 0;
CompressionType compression = kNoCompression;
s = DumpBlobLogHeader(&offset, &compression);
if (!s.ok()) {
return s;
}
s = DumpBlobLogFooter(file_size, &footer_offset);
if (!s.ok()) {
return s;
}
uint64_t total_records = 0;
uint64_t total_key_size = 0;
uint64_t total_blob_size = 0;
uint64_t total_uncompressed_blob_size = 0;
if (show_key != DisplayType::kNone || show_summary) {
while (offset < footer_offset) {
s = DumpRecord(show_key, show_blob, show_uncompressed_blob, show_summary,
compression, &offset, &total_records, &total_key_size,
&total_blob_size, &total_uncompressed_blob_size);
if (!s.ok()) {
break;
}
}
}
if (show_summary) {
fprintf(stdout, "Summary:\n");
fprintf(stdout, " total records: %" PRIu64 "\n", total_records);
fprintf(stdout, " total key size: %" PRIu64 "\n", total_key_size);
fprintf(stdout, " total blob size: %" PRIu64 "\n", total_blob_size);
if (compression != kNoCompression) {
fprintf(stdout, " total raw blob size: %" PRIu64 "\n",
total_uncompressed_blob_size);
}
}
return s;
}
Status BlobDumpTool::Read(uint64_t offset, size_t size, Slice* result) {
if (buffer_size_ < size) {
if (buffer_size_ == 0) {
buffer_size_ = 4096;
}
while (buffer_size_ < size) {
buffer_size_ *= 2;
}
buffer_.reset(new char[buffer_size_]);
}
Add rate limiter priority to ReadOptions (#9424) Summary: Users can set the priority for file reads associated with their operation by setting `ReadOptions::rate_limiter_priority` to something other than `Env::IO_TOTAL`. Rate limiting `VerifyChecksum()` and `VerifyFileChecksums()` is the motivation for this PR, so it also includes benchmarks and minor bug fixes to get that working. `RandomAccessFileReader::Read()` already had support for rate limiting compaction reads. I changed that rate limiting to be non-specific to compaction, but rather performed according to the passed in `Env::IOPriority`. Now the compaction read rate limiting is supported by setting `rate_limiter_priority = Env::IO_LOW` on its `ReadOptions`. There is no default value for the new `Env::IOPriority` parameter to `RandomAccessFileReader::Read()`. That means this PR goes through all callers (in some cases multiple layers up the call stack) to find a `ReadOptions` to provide the priority. There are TODOs for cases I believe it would be good to let user control the priority some day (e.g., file footer reads), and no TODO in cases I believe it doesn't matter (e.g., trace file reads). The API doc only lists the missing cases where a file read associated with a provided `ReadOptions` cannot be rate limited. For cases like file ingestion checksum calculation, there is no API to provide `ReadOptions` or `Env::IOPriority`, so I didn't count that as missing. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9424 Test Plan: - new unit tests - new benchmarks on ~50MB database with 1MB/s read rate limit and 100ms refill interval; verified with strace reads are chunked (at 0.1MB per chunk) and spaced roughly 100ms apart. - setup command: `./db_bench -benchmarks=fillrandom,compact -db=/tmp/testdb -target_file_size_base=1048576 -disable_auto_compactions=true -file_checksum=true` - benchmarks command: `strace -ttfe pread64 ./db_bench -benchmarks=verifychecksum,verifyfilechecksums -use_existing_db=true -db=/tmp/testdb -rate_limiter_bytes_per_sec=1048576 -rate_limit_bg_reads=1 -rate_limit_user_ops=true -file_checksum=true` - crash test using IO_USER priority on non-validation reads with https://github.com/facebook/rocksdb/issues/9567 reverted: `python3 tools/db_crashtest.py blackbox --max_key=1000000 --write_buffer_size=524288 --target_file_size_base=524288 --level_compaction_dynamic_level_bytes=true --duration=3600 --rate_limit_bg_reads=true --rate_limit_user_ops=true --rate_limiter_bytes_per_sec=10485760 --interval=10` Reviewed By: hx235 Differential Revision: D33747386 Pulled By: ajkr fbshipit-source-id: a2d985e97912fba8c54763798e04f006ccc56e0c
3 years ago
Status s = reader_->Read(IOOptions(), offset, size, result, buffer_.get(),
nullptr, Env::IO_TOTAL /* rate_limiter_priority */);
if (!s.ok()) {
return s;
}
if (result->size() != size) {
return Status::Corruption("Reach the end of the file unexpectedly.");
}
return s;
}
Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset,
CompressionType* compression) {
Slice slice;
Status s = Read(0, BlobLogHeader::kSize, &slice);
if (!s.ok()) {
return s;
}
BlobLogHeader header;
s = header.DecodeFrom(slice);
if (!s.ok()) {
return s;
}
fprintf(stdout, "Blob log header:\n");
fprintf(stdout, " Version : %" PRIu32 "\n", header.version);
fprintf(stdout, " Column Family ID : %" PRIu32 "\n",
header.column_family_id);
std::string compression_str;
if (!GetStringFromCompressionType(&compression_str, header.compression)
.ok()) {
compression_str = "Unrecongnized compression type (" +
std::to_string((int)header.compression) + ")";
}
fprintf(stdout, " Compression : %s\n", compression_str.c_str());
fprintf(stdout, " Expiration range : %s\n",
GetString(header.expiration_range).c_str());
*offset = BlobLogHeader::kSize;
*compression = header.compression;
return s;
}
Status BlobDumpTool::DumpBlobLogFooter(uint64_t file_size,
uint64_t* footer_offset) {
auto no_footer = [&]() {
*footer_offset = file_size;
fprintf(stdout, "No blob log footer.\n");
return Status::OK();
};
if (file_size < BlobLogHeader::kSize + BlobLogFooter::kSize) {
return no_footer();
}
Slice slice;
*footer_offset = file_size - BlobLogFooter::kSize;
Status s = Read(*footer_offset, BlobLogFooter::kSize, &slice);
if (!s.ok()) {
return s;
}
BlobLogFooter footer;
s = footer.DecodeFrom(slice);
if (!s.ok()) {
return no_footer();
}
fprintf(stdout, "Blob log footer:\n");
fprintf(stdout, " Blob count : %" PRIu64 "\n", footer.blob_count);
fprintf(stdout, " Expiration Range : %s\n",
GetString(footer.expiration_range).c_str());
return s;
}
Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
DisplayType show_uncompressed_blob,
bool show_summary, CompressionType compression,
uint64_t* offset, uint64_t* total_records,
uint64_t* total_key_size,
uint64_t* total_blob_size,
uint64_t* total_uncompressed_blob_size) {
if (show_key != DisplayType::kNone) {
fprintf(stdout, "Read record with offset 0x%" PRIx64 " (%" PRIu64 "):\n",
*offset, *offset);
}
Slice slice;
Status s = Read(*offset, BlobLogRecord::kHeaderSize, &slice);
if (!s.ok()) {
return s;
}
BlobLogRecord record;
s = record.DecodeHeaderFrom(slice);
if (!s.ok()) {
return s;
}
uint64_t key_size = record.key_size;
uint64_t value_size = record.value_size;
if (show_key != DisplayType::kNone) {
fprintf(stdout, " key size : %" PRIu64 "\n", key_size);
fprintf(stdout, " value size : %" PRIu64 "\n", value_size);
fprintf(stdout, " expiration : %" PRIu64 "\n", record.expiration);
}
*offset += BlobLogRecord::kHeaderSize;
s = Read(*offset, static_cast<size_t>(key_size + value_size), &slice);
if (!s.ok()) {
return s;
}
// Decompress value
std::string uncompressed_value;
if (compression != kNoCompression &&
(show_uncompressed_blob != DisplayType::kNone || show_summary)) {
BlockContents contents;
UncompressionContext context(compression);
UncompressionInfo info(context, UncompressionDict::GetEmptyDict(),
compression);
Refactor to avoid confusing "raw block" (#10408) Summary: We have a lot of confusing code because of mixed, sometimes completely opposite uses of of the term "raw block" or "raw contents", sometimes within the same source file. For example, in `BlockBasedTableBuilder`, `raw_block_contents` and `raw_size` generally referred to uncompressed block contents and size, while `WriteRawBlock` referred to writing a block that is already compressed if it is going to be. Meanwhile, in `BlockBasedTable`, `raw_block_contents` either referred to a (maybe compressed) block with trailer, or a maybe compressed block maybe without trailer. (Note: left as follow-up work to use C++ typing to better sort out the various kinds of BlockContents.) This change primarily tries to apply some consistent terminology around the kinds of block representations, avoiding the unclear "raw". (Any meaning of "raw" assumes some bias toward the storage layer or toward the logical data layer.) Preferred terminology: * **Serialized block** - bytes that go into storage. For block-based table (usually the case) this includes the block trailer. WART: block `size` may or may not include the trailer; need to be clear about whether it does or not. * **Maybe compressed block** - like a serialized block, but without the trailer (or no promise of including a trailer). Must be accompanied by a CompressionType. * **Uncompressed block** - "payload" bytes that are either stored with no compression, used as input to compression function, or result of decompression function. * **Parsed block** - an in-memory form of a block in block cache, as it is used by the table reader. Different C++ types are used depending on the block type (see block_like_traits.h). Other refactorings: * Misc corrections/improvements of internal API comments * Remove a few misleading / unhelpful / redundant comments. * Use move semantics in some places to simplify contracts * Use better parameter names to indicate which parameters are used for outputs * Remove some extraneous `extern` * Various clean-ups to `CacheDumperImpl` (mostly unnecessary code) Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408 Test Plan: existing tests Reviewed By: akankshamahajan15 Differential Revision: D38172617 Pulled By: pdillinger fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2 years ago
s = UncompressBlockData(
info, slice.data() + key_size, static_cast<size_t>(value_size),
&contents, 2 /*compress_format_version*/, ImmutableOptions(Options()));
if (!s.ok()) {
return s;
}
uncompressed_value = contents.data.ToString();
}
if (show_key != DisplayType::kNone) {
fprintf(stdout, " key : ");
DumpSlice(Slice(slice.data(), static_cast<size_t>(key_size)), show_key);
if (show_blob != DisplayType::kNone) {
fprintf(stdout, " blob : ");
DumpSlice(Slice(slice.data() + static_cast<size_t>(key_size),
static_cast<size_t>(value_size)),
show_blob);
}
if (show_uncompressed_blob != DisplayType::kNone) {
fprintf(stdout, " raw blob : ");
DumpSlice(Slice(uncompressed_value), show_uncompressed_blob);
}
}
*offset += key_size + value_size;
*total_records += 1;
*total_key_size += key_size;
*total_blob_size += value_size;
*total_uncompressed_blob_size += uncompressed_value.size();
return s;
}
void BlobDumpTool::DumpSlice(const Slice s, DisplayType type) {
if (type == DisplayType::kRaw) {
fprintf(stdout, "%s\n", s.ToString().c_str());
} else if (type == DisplayType::kHex) {
fprintf(stdout, "%s\n", s.ToString(true /*hex*/).c_str());
} else if (type == DisplayType::kDetail) {
char buf[100];
for (size_t i = 0; i < s.size(); i += 16) {
memset(buf, 0, sizeof(buf));
for (size_t j = 0; j < 16 && i + j < s.size(); j++) {
unsigned char c = s[i + j];
snprintf(buf + j * 3 + 15, 2, "%x", c >> 4);
snprintf(buf + j * 3 + 16, 2, "%x", c & 0xf);
snprintf(buf + j + 65, 2, "%c", (0x20 <= c && c <= 0x7e) ? c : '.');
}
for (size_t p = 0; p + 1 < sizeof(buf); p++) {
if (buf[p] == 0) {
buf[p] = ' ';
}
}
fprintf(stdout, "%s\n", i == 0 ? buf + 15 : buf);
}
}
}
template <class T>
std::string BlobDumpTool::GetString(std::pair<T, T> p) {
if (p.first == 0 && p.second == 0) {
return "nil";
}
return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")";
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE