Fix many bugs in log statement arguments (#5089)

Summary:
Annotate all of the logging functions to inform the compiler that these
use printf-style formatting arguments.  This allows the compiler to emit
warnings if the format arguments are incorrect.

This also fixes many problems reported now that format string checking
is enabled.  Many of these are simply mix-ups in the argument type (e.g,
int vs uint64_t), but in several cases the wrong number of arguments
were being passed in which can cause the code to crash.

The primary motivation for this was to fix the log message in
`DBImpl::SwitchMemtable()` which caused a segfault due to an extra %s
format parameter with no argument supplied.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5089

Differential Revision: D14574795

Pulled By: simpkins

fbshipit-source-id: 0921b03f0743652bf4ae21e414ff54b3bb65422a
main
Adam Simpkins 6 years ago committed by Facebook Github Bot
parent f0edf9d575
commit c06c4c01c5
  1. 5
      db/db_impl_compaction_flush.cc
  2. 5
      db/db_impl_open.cc
  3. 4
      db/db_impl_write.cc
  4. 41
      db/version_set.cc
  5. 52
      include/rocksdb/env.h
  6. 11
      options/db_options.cc
  7. 11
      options/options.cc
  8. 2
      table/plain_table_index.cc
  9. 4
      util/auto_roll_logger_test.cc
  10. 6
      util/duplicate_detector.h
  11. 29
      util/sst_file_manager_impl.cc
  12. 2
      util/transaction_test_util.cc
  13. 51
      utilities/blob_db/blob_db_impl.cc
  14. 12
      utilities/blob_db/blob_file.cc
  15. 2
      utilities/persistent_cache/block_cache_tier.h

@ -1310,8 +1310,9 @@ Status DBImpl::Flush(const FlushOptions& flush_options,
});
s = AtomicFlushMemTables(cfds, flush_options, FlushReason::kManualFlush);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Manual atomic flush finished, status: %s\n",
"=====Column families:=====", s.ToString().c_str());
"Manual atomic flush finished, status: %s\n"
"=====Column families:=====",
s.ToString().c_str());
for (auto cfh : column_families) {
auto cfhi = static_cast<ColumnFamilyHandleImpl*>(cfh);
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s",

@ -580,7 +580,7 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Recovering log #%" PRIu64 " mode %d", log_number,
immutable_db_options_.wal_recovery_mode);
static_cast<int>(immutable_db_options_.wal_recovery_mode));
auto logFileDropped = [this, &fname]() {
uint64_t bytes;
if (env_->GetFileSize(fname, &bytes).ok()) {
@ -723,7 +723,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
" mode %d log filter %s returned "
"more records (%d) than original (%d) which is not allowed. "
"Aborting recovery.",
log_number, immutable_db_options_.wal_recovery_mode,
log_number,
static_cast<int>(immutable_db_options_.wal_recovery_mode),
immutable_db_options_.wal_filter->Name(), new_count,
original_count);
status = Status::NotSupported(

@ -1127,7 +1127,7 @@ Status DBImpl::HandleWriteBufferFull(WriteContext* write_context) {
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"Flushing column family with largest mem table size. Write buffer is "
"using %" PRIu64 " bytes out of a total of %" PRIu64 ".",
"using %" ROCKSDB_PRIszt " bytes out of a total of %" ROCKSDB_PRIszt ".",
write_buffer_manager_->memory_usage(),
write_buffer_manager_->buffer_size());
// no need to refcount because drop is happening in write thread, so can't
@ -1479,7 +1479,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
if (!s.ok()) {
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"[%s] Failed to switch from #%" PRIu64 " to #%" PRIu64
" WAL file -- %s\n",
" WAL file\n",
cfd->GetName().c_str(), cur_log_writer->get_log_number(),
new_log_number);
}

@ -3747,22 +3747,22 @@ Status VersionSet::Recover(
ROCKS_LOG_INFO(
db_options_->info_log,
"Recovered from manifest file:%s succeeded,"
"manifest_file_number is %lu, next_file_number is %lu, "
"last_sequence is %lu, log_number is %lu,"
"prev_log_number is %lu,"
"max_column_family is %u,"
"min_log_number_to_keep is %lu\n",
manifest_path.c_str(), (unsigned long)manifest_file_number_,
(unsigned long)next_file_number_.load(), (unsigned long)last_sequence_,
(unsigned long)log_number, (unsigned long)prev_log_number_,
column_family_set_->GetMaxColumnFamily(), min_log_number_to_keep_2pc());
"manifest_file_number is %" PRIu64 ", next_file_number is %" PRIu64
", last_sequence is %" PRIu64 ", log_number is %" PRIu64
",prev_log_number is %" PRIu64 ",max_column_family is %" PRIu32
",min_log_number_to_keep is %" PRIu64 "\n",
manifest_path.c_str(), manifest_file_number_,
next_file_number_.load(), last_sequence_.load(), log_number,
prev_log_number_, column_family_set_->GetMaxColumnFamily(),
min_log_number_to_keep_2pc());
for (auto cfd : *column_family_set_) {
if (cfd->IsDropped()) {
continue;
}
ROCKS_LOG_INFO(db_options_->info_log,
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
"Column family [%s] (ID %" PRIu32
"), log number is %" PRIu64 "\n",
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
}
}
@ -4098,9 +4098,10 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
builder->SaveTo(v->storage_info());
v->PrepareApply(*cfd->GetLatestMutableCFOptions(), false);
printf("--------------- Column family \"%s\" (ID %u) --------------\n",
cfd->GetName().c_str(), (unsigned int)cfd->GetID());
printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber());
printf("--------------- Column family \"%s\" (ID %" PRIu32
") --------------\n",
cfd->GetName().c_str(), cfd->GetID());
printf("log number: %" PRIu64 "\n", cfd->GetLogNumber());
auto comparator = comparators.find(cfd->GetID());
if (comparator != comparators.end()) {
printf("comparator: %s\n", comparator->second.c_str());
@ -4117,13 +4118,13 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
last_sequence_ = last_sequence;
prev_log_number_ = previous_log_number;
printf(
"next_file_number %lu last_sequence "
"%lu prev_log_number %lu max_column_family %u min_log_number_to_keep "
"%" PRIu64 "\n",
(unsigned long)next_file_number_.load(), (unsigned long)last_sequence,
(unsigned long)previous_log_number,
column_family_set_->GetMaxColumnFamily(), min_log_number_to_keep_2pc());
printf("next_file_number %" PRIu64 " last_sequence %" PRIu64
" prev_log_number %" PRIu64 " max_column_family %" PRIu32
" min_log_number_to_keep "
"%" PRIu64 "\n",
next_file_number_.load(), last_sequence, previous_log_number,
column_family_set_->GetMaxColumnFamily(),
min_log_number_to_keep_2pc());
}
return s;

@ -32,6 +32,13 @@
#undef GetCurrentTime
#endif
#if defined(__GNUC__) || defined(__clang__)
#define ROCKSDB_PRINTF_FORMAT_ATTR(format_param, dots_param) \
__attribute__((__format__(__printf__, format_param, dots_param)))
#else
#define ROCKSDB_PRINTF_FORMAT_ATTR(format_param, dots_param)
#endif
namespace rocksdb {
class FileLock;
@ -929,50 +936,49 @@ extern void LogFlush(const std::shared_ptr<Logger>& info_log);
extern void Log(const InfoLogLevel log_level,
const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(3, 4);
// a set of log functions with different log levels.
extern void Header(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Debug(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Info(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Warn(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Error(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Fatal(const std::shared_ptr<Logger>& info_log, const char* format,
...);
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
// Log the specified data to *info_log if info_log is non-nullptr.
// The default info log level is InfoLogLevel::INFO_LEVEL.
extern void Log(const std::shared_ptr<Logger>& info_log, const char* format,
...)
#if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void LogFlush(Logger* info_log);
extern void Log(const InfoLogLevel log_level, Logger* info_log,
const char* format, ...);
const char* format, ...) ROCKSDB_PRINTF_FORMAT_ATTR(3, 4);
// The default info log level is InfoLogLevel::INFO_LEVEL.
extern void Log(Logger* info_log, const char* format, ...)
#if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
// a set of log functions with different log levels.
extern void Header(Logger* info_log, const char* format, ...);
extern void Debug(Logger* info_log, const char* format, ...);
extern void Info(Logger* info_log, const char* format, ...);
extern void Warn(Logger* info_log, const char* format, ...);
extern void Error(Logger* info_log, const char* format, ...);
extern void Fatal(Logger* info_log, const char* format, ...);
extern void Header(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Debug(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Info(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Warn(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Error(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
extern void Fatal(Logger* info_log, const char* format, ...)
ROCKSDB_PRINTF_FORMAT_ATTR(2, 3);
// A utility routine: write "data" to the named file.
extern Status WriteStringToFile(Env* env, const Slice& data,

@ -180,7 +180,7 @@ void ImmutableDBOptions::Dump(Logger* log) const {
log, " Options.sst_file_manager.rate_bytes_per_sec: %" PRIi64,
sst_file_manager ? sst_file_manager->GetDeleteRateBytesPerSecond() : 0);
ROCKS_LOG_HEADER(log, " Options.wal_recovery_mode: %d",
wal_recovery_mode);
static_cast<int>(wal_recovery_mode));
ROCKS_LOG_HEADER(log, " Options.enable_thread_tracking: %d",
enable_thread_tracking);
ROCKS_LOG_HEADER(log, " Options.enable_pipelined_write: %d",
@ -197,7 +197,8 @@ void ImmutableDBOptions::Dump(Logger* log) const {
write_thread_slow_yield_usec);
if (row_cache) {
ROCKS_LOG_HEADER(
log, " Options.row_cache: %" PRIu64,
log,
" Options.row_cache: %" ROCKSDB_PRIszt,
row_cache->GetCapacity());
} else {
ROCKS_LOG_HEADER(log,
@ -280,8 +281,10 @@ void MutableDBOptions::Dump(Logger* log) const {
stats_dump_period_sec);
ROCKS_LOG_HEADER(log, " Options.stats_persist_period_sec: %d",
stats_persist_period_sec);
ROCKS_LOG_HEADER(log, " Options.stats_history_buffer_size: %d",
stats_history_buffer_size);
ROCKS_LOG_HEADER(
log,
" Options.stats_history_buffer_size: %" ROCKSDB_PRIszt,
stats_history_buffer_size);
ROCKS_LOG_HEADER(log, " Options.max_open_files: %d",
max_open_files);
ROCKS_LOG_HEADER(log,

@ -173,12 +173,12 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
ROCKS_LOG_HEADER(
log,
" Options.bottommost_compression_opts.max_dict_bytes: "
"%" ROCKSDB_PRIszt,
"%" PRIu32,
bottommost_compression_opts.max_dict_bytes);
ROCKS_LOG_HEADER(
log,
" Options.bottommost_compression_opts.zstd_max_train_bytes: "
"%" ROCKSDB_PRIszt,
"%" PRIu32,
bottommost_compression_opts.zstd_max_train_bytes);
ROCKS_LOG_HEADER(
log, " Options.bottommost_compression_opts.enabled: %s",
@ -191,11 +191,11 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
compression_opts.strategy);
ROCKS_LOG_HEADER(
log,
" Options.compression_opts.max_dict_bytes: %" ROCKSDB_PRIszt,
" Options.compression_opts.max_dict_bytes: %" PRIu32,
compression_opts.max_dict_bytes);
ROCKS_LOG_HEADER(log,
" Options.compression_opts.zstd_max_train_bytes: "
"%" ROCKSDB_PRIszt,
"%" PRIu32,
compression_opts.zstd_max_train_bytes);
ROCKS_LOG_HEADER(log,
" Options.compression_opts.enabled: %s",
@ -350,7 +350,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
force_consistency_checks);
ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d",
report_bg_io_stats);
ROCKS_LOG_HEADER(log, " Options.ttl: %d", ttl);
ROCKS_LOG_HEADER(log, " Options.ttl: %" PRIu64,
ttl);
} // ColumnFamilyOptions::Dump
void Options::Dump(Logger* log) const {

@ -203,7 +203,7 @@ Slice PlainTableIndexBuilder::FillIndexes(
assert(sub_index_offset == sub_index_size_);
ROCKS_LOG_DEBUG(ioptions_.info_log,
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
"hash table size: %" PRIu32 ", suffix_map length %" PRIu32,
index_size_, sub_index_size_);
return Slice(allocated, GetTotalSize());
}

@ -452,12 +452,12 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) {
if (test_num == 0) {
// Log some headers explicitly using Header()
for (size_t i = 0; i < MAX_HEADERS; i++) {
Header(&logger, "%s %d", HEADER_STR.c_str(), i);
Header(&logger, "%s %" ROCKSDB_PRIszt, HEADER_STR.c_str(), i);
}
} else if (test_num == 1) {
// HEADER_LEVEL should make this behave like calling Header()
for (size_t i = 0; i < MAX_HEADERS; i++) {
ROCKS_LOG_HEADER(&logger, "%s %d", HEADER_STR.c_str(), i);
ROCKS_LOG_HEADER(&logger, "%s %" ROCKSDB_PRIszt, HEADER_STR.c_str(), i);
}
}

@ -56,11 +56,11 @@ class DuplicateDetector {
db_->immutable_db_options().info_log,
"Recovering an entry from the dropped column family %" PRIu32
". WAL must must have been emptied before dropping the column "
"family");
"family", cf);
#ifndef ROCKSDB_LITE
throw std::runtime_error(
"Recovering an entry from the dropped column family %" PRIu32
". WAL must must have been flushed before dropping the column "
"Recovering an entry from a dropped column family. "
"WAL must must have been flushed before dropping the column "
"family");
#endif
return;

@ -5,6 +5,11 @@
#include "util/sst_file_manager_impl.h"
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <vector>
#include "db/db_impl.h"
@ -189,8 +194,11 @@ bool SstFileManagerImpl::EnoughRoomForCompaction(
needed_headroom -= in_progress_files_size_;
if (free_space < needed_headroom + size_added_by_compaction) {
// We hit the condition of not enough disk space
ROCKS_LOG_ERROR(logger_, "free space [%d bytes] is less than "
"needed headroom [%d bytes]\n", free_space, needed_headroom);
ROCKS_LOG_ERROR(logger_,
"free space [%" PRIu64
" bytes] is less than "
"needed headroom [%" ROCKSDB_PRIszt " bytes]\n",
free_space, needed_headroom);
return false;
}
}
@ -266,17 +274,22 @@ void SstFileManagerImpl::ClearError() {
// now
if (bg_err_.severity() == Status::Severity::kHardError) {
if (free_space < reserved_disk_buffer_) {
ROCKS_LOG_ERROR(logger_, "free space [%d bytes] is less than "
"required disk buffer [%d bytes]\n", free_space,
reserved_disk_buffer_);
ROCKS_LOG_ERROR(logger_,
"free space [%" PRIu64
" bytes] is less than "
"required disk buffer [%" PRIu64 " bytes]\n",
free_space, reserved_disk_buffer_);
ROCKS_LOG_ERROR(logger_, "Cannot clear hard error\n");
s = Status::NoSpace();
}
} else if (bg_err_.severity() == Status::Severity::kSoftError) {
if (free_space < free_space_trigger_) {
ROCKS_LOG_WARN(logger_, "free space [%d bytes] is less than "
"free space for compaction trigger [%d bytes]\n", free_space,
free_space_trigger_);
ROCKS_LOG_WARN(logger_,
"free space [%" PRIu64
" bytes] is less than "
"free space for compaction trigger [%" PRIu64
" bytes]\n",
free_space, free_space_trigger_);
ROCKS_LOG_WARN(logger_, "Cannot clear soft error\n");
s = Status::NoSpace();
}

@ -344,7 +344,7 @@ Status RandomTransactionInserter::Verify(DB* db, uint16_t num_sets,
roptions.snapshot
? ((SnapshotImpl*)roptions.snapshot)->min_uncommitted_
: 0ul,
key.size(), key.data(), int_value);
static_cast<int>(key.size()), key.data(), int_value);
total += int_value;
}
delete iter;

@ -272,7 +272,7 @@ Status BlobDBImpl::OpenAllBlobFiles() {
continue;
} else if (!read_metadata_status.ok()) {
ROCKS_LOG_ERROR(db_options_.info_log,
"Unable to read metadata of blob file % " PRIu64
"Unable to read metadata of blob file %" PRIu64
", status: '%s'",
file_number, read_metadata_status.ToString().c_str());
return read_metadata_status;
@ -345,8 +345,9 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr<BlobFile>& bfile) {
uint64_t boffset = bfile->GetFileSize();
if (debug_level_ >= 2 && boffset) {
ROCKS_LOG_DEBUG(db_options_.info_log, "Open blob file: %s with offset: %d",
fpath.c_str(), boffset);
ROCKS_LOG_DEBUG(db_options_.info_log,
"Open blob file: %s with offset: %" PRIu64, fpath.c_str(),
boffset);
}
Writer::ElemType et = Writer::kEtNone;
@ -356,8 +357,8 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr<BlobFile>& bfile) {
et = Writer::kEtRecord;
} else if (bfile->file_size_) {
ROCKS_LOG_WARN(db_options_.info_log,
"Open blob file: %s with wrong size: %d", fpath.c_str(),
boffset);
"Open blob file: %s with wrong size: %" PRIu64,
fpath.c_str(), boffset);
return Status::Corruption("Invalid blob file size");
}
@ -498,7 +499,8 @@ Status BlobDBImpl::SelectBlobFileTTL(uint64_t expiration,
*blob_file = NewBlobFile("SelectBlobFileTTL");
assert(*blob_file != nullptr);
ROCKS_LOG_INFO(db_options_.info_log, "New blob file TTL range: %s %d %d",
ROCKS_LOG_INFO(db_options_.info_log,
"New blob file TTL range: %s %" PRIu64 " %" PRIu64,
(*blob_file)->PathName().c_str(), exp_low, exp_high);
LogFlush(db_options_.info_log);
@ -736,12 +738,12 @@ Status BlobDBImpl::PutBlobValue(const WriteOptions& /*options*/,
RecordTick(statistics_, BLOB_DB_WRITE_BLOB_TTL);
}
} else {
ROCKS_LOG_ERROR(db_options_.info_log,
"Failed to append blob to FILE: %s: KEY: %s VALSZ: %d"
" status: '%s' blob_file: '%s'",
blob_file->PathName().c_str(), key.ToString().c_str(),
value.size(), s.ToString().c_str(),
blob_file->DumpState().c_str());
ROCKS_LOG_ERROR(
db_options_.info_log,
"Failed to append blob to FILE: %s: KEY: %s VALSZ: %" ROCKSDB_PRIszt
" status: '%s' blob_file: '%s'",
blob_file->PathName().c_str(), key.ToString().c_str(), value.size(),
s.ToString().c_str(), blob_file->DumpState().c_str());
}
}
@ -1048,20 +1050,19 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
ROCKS_LOG_DEBUG(db_options_.info_log,
"Failed to read blob from blob file %" PRIu64
", blob_offset: %" PRIu64 ", blob_size: %" PRIu64
", key_size: " PRIu64 ", read " PRIu64
" bytes, status: '%s'",
", key_size: %" ROCKSDB_PRIszt ", status: '%s'",
bfile->BlobFileNumber(), blob_index.offset(),
blob_index.size(), key.size(), s.ToString().c_str());
return s;
}
if (blob_record.size() != record_size) {
ROCKS_LOG_DEBUG(db_options_.info_log,
"Failed to read blob from blob file %" PRIu64
", blob_offset: %" PRIu64 ", blob_size: %" PRIu64
", key_size: " PRIu64 ", read " PRIu64
" bytes, status: '%s'",
bfile->BlobFileNumber(), blob_index.offset(),
blob_index.size(), key.size(), s.ToString().c_str());
ROCKS_LOG_DEBUG(
db_options_.info_log,
"Failed to read blob from blob file %" PRIu64 ", blob_offset: %" PRIu64
", blob_size: %" PRIu64 ", key_size: %" ROCKSDB_PRIszt
", read %" ROCKSDB_PRIszt " bytes, expected %" PRIu64 " bytes",
bfile->BlobFileNumber(), blob_index.offset(), blob_index.size(),
key.size(), blob_record.size(), record_size);
return Status::Corruption("Failed to retrieve blob from blob index.");
}
@ -1073,7 +1074,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
ROCKS_LOG_DEBUG(db_options_.info_log,
"Unable to decode CRC from blob file %" PRIu64
", blob_offset: %" PRIu64 ", blob_size: %" PRIu64
", key size: %" PRIu64 ", status: '%s'",
", key size: %" ROCKSDB_PRIszt ", status: '%s'",
bfile->BlobFileNumber(), blob_index.offset(),
blob_index.size(), key.size(), s.ToString().c_str());
return Status::Corruption("Unable to decode checksum.");
@ -1174,9 +1175,9 @@ std::pair<bool, int64_t> BlobDBImpl::SanityCheck(bool aborted) {
}
ROCKS_LOG_INFO(db_options_.info_log, "Starting Sanity Check");
ROCKS_LOG_INFO(db_options_.info_log, "Number of files %" PRIu64,
ROCKS_LOG_INFO(db_options_.info_log, "Number of files %" ROCKSDB_PRIszt,
blob_files_.size());
ROCKS_LOG_INFO(db_options_.info_log, "Number of open files %" PRIu64,
ROCKS_LOG_INFO(db_options_.info_log, "Number of open files %" ROCKSDB_PRIszt,
open_ttl_files_.size());
for (auto bfile : open_ttl_files_) {
@ -1475,7 +1476,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr<BlobFile>& bfptr,
bfptr->OpenRandomAccessReader(env_, db_options_, env_options_);
if (!reader) {
ROCKS_LOG_ERROR(db_options_.info_log,
"File sequential reader could not be opened",
"File sequential reader could not be opened for %s",
bfptr->PathName().c_str());
return Status::IOError("failed to create sequential reader");
}

@ -244,14 +244,14 @@ Status BlobFile::ReadMetadata(Env* env, const EnvOptions& env_options) {
file_size_ = file_size;
} else {
ROCKS_LOG_ERROR(info_log_,
"Failed to get size of blob file %" ROCKSDB_PRIszt
"Failed to get size of blob file %" PRIu64
", status: %s",
file_number_, s.ToString().c_str());
return s;
}
if (file_size < BlobLogHeader::kSize) {
ROCKS_LOG_ERROR(info_log_,
"Incomplete blob file blob file %" ROCKSDB_PRIszt
"Incomplete blob file blob file %" PRIu64
", size: %" PRIu64,
file_number_, file_size);
return Status::Corruption("Incomplete blob file header.");
@ -262,7 +262,7 @@ Status BlobFile::ReadMetadata(Env* env, const EnvOptions& env_options) {
s = env->NewRandomAccessFile(PathName(), &file, env_options);
if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_,
"Failed to open blob file %" ROCKSDB_PRIszt ", status: %s",
"Failed to open blob file %" PRIu64 ", status: %s",
file_number_, s.ToString().c_str());
return s;
}
@ -275,7 +275,7 @@ Status BlobFile::ReadMetadata(Env* env, const EnvOptions& env_options) {
s = file_reader->Read(0, BlobLogHeader::kSize, &header_slice, header_buf);
if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_,
"Failed to read header of blob file %" ROCKSDB_PRIszt
"Failed to read header of blob file %" PRIu64
", status: %s",
file_number_, s.ToString().c_str());
return s;
@ -284,7 +284,7 @@ Status BlobFile::ReadMetadata(Env* env, const EnvOptions& env_options) {
s = header.DecodeFrom(header_slice);
if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_,
"Failed to decode header of blob file %" ROCKSDB_PRIszt
"Failed to decode header of blob file %" PRIu64
", status: %s",
file_number_, s.ToString().c_str());
return s;
@ -309,7 +309,7 @@ Status BlobFile::ReadMetadata(Env* env, const EnvOptions& env_options) {
&footer_slice, footer_buf);
if (!s.ok()) {
ROCKS_LOG_ERROR(info_log_,
"Failed to read footer of blob file %" ROCKSDB_PRIszt
"Failed to read footer of blob file %" PRIu64
", status: %s",
file_number_, s.ToString().c_str());
return s;

@ -47,7 +47,7 @@ class BlockCacheTier : public PersistentCacheTier {
insert_ops_(static_cast<size_t>(opt_.max_write_pipeline_backlog_size)),
buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
writer_(this, opt_.writer_qdepth, static_cast<size_t>(opt_.writer_dispatch_size)) {
Info(opt_.log, "Initializing allocator. size=%d B count=%d",
Info(opt_.log, "Initializing allocator. size=%d B count=%" ROCKSDB_PRIszt,
opt_.write_buffer_size, opt_.write_buffer_count());
}

Loading…
Cancel
Save