Add macros to include file name and line number during Logging

Summary:
current logging
```
2017/03/14-14:20:30.393432 7fedde9f5700 (Original Log Time 2017/03/14-14:20:30.393414) [default] Level summary: base level 1 max bytes base 268435456 files[1 0 0 0 0 0 0] max score 0.25
2017/03/14-14:20:30.393438 7fedde9f5700 [JOB 2] Try to delete WAL files size 61417909, prev total WAL file size 73820858, number of live WAL files 2.
2017/03/14-14:20:30.393464 7fedde9f5700 [DEBUG] [JOB 2] Delete /dev/shm/old_logging//MANIFEST-000001 type=3 #1 -- OK
2017/03/14-14:20:30.393472 7fedde9f5700 [DEBUG] [JOB 2] Delete /dev/shm/old_logging//000003.log type=0 #3 -- OK
2017/03/14-14:20:31.427103 7fedd49f1700 [default] New memtable created with log file: #9. Immutable memtables: 0.
2017/03/14-14:20:31.427179 7fedde9f5700 [JOB 3] Syncing log #6
2017/03/14-14:20:31.427190 7fedde9f5700 (Original Log Time 2017/03/14-14:20:31.427170) Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots allowed 1, compaction slots scheduled 1
2017/03/14-14:20:31.
Closes https://github.com/facebook/rocksdb/pull/1990

Differential Revision: D4708695

Pulled By: IslamAbdelRahman

fbshipit-source-id: cb8968f
main
Islam AbdelRahman 8 years ago committed by Facebook Github Bot
parent d525718a93
commit e19163688b
  1. 38
      db/auto_roll_logger_test.cc
  2. 94
      db/column_family.cc
  3. 4
      db/compacted_db_impl.cc
  4. 40
      db/compaction_job.cc
  5. 115
      db/compaction_picker.cc
  6. 19
      db/db_filesnapshot.cc
  7. 463
      db/db_impl.cc
  8. 27
      db/db_impl_experimental.cc
  9. 4
      db/db_impl_readonly.cc
  10. 8
      db/db_iter.cc
  11. 18
      db/external_sst_file_ingestion_job.cc
  12. 29
      db/flush_job.cc
  13. 18
      db/memtable_list.cc
  14. 62
      db/repair.cc
  15. 9
      db/transaction_log_impl.h
  16. 50
      db/version_set.cc
  17. 68
      db/wal_manager.cc
  18. 7
      table/block_based_table_builder.cc
  19. 64
      table/block_based_table_reader.cc
  20. 17
      table/format.cc
  21. 5
      table/meta_blocks.cc
  22. 17
      table/plain_table_index.cc
  23. 8
      util/arena.cc
  24. 95
      util/cf_options.cc
  25. 251
      util/db_options.cc
  26. 13
      util/delete_scheduler.cc
  27. 115
      util/env_hdfs.cc
  28. 17
      util/env_test.cc
  29. 1
      util/event_logger.cc
  30. 36
      util/logging.h
  31. 258
      util/options.cc
  32. 166
      utilities/backupable/backupable_db.cc
  33. 40
      utilities/checkpoint/checkpoint.cc
  34. 8
      utilities/merge_operators/uint64add.cc
  35. 7
      utilities/persistent_cache/block_cache_tier.cc
  36. 21
      utilities/persistent_cache/block_cache_tier_file.cc
  37. 17
      utilities/ttl/db_ttl_impl.h

@ -6,21 +6,22 @@
#ifndef ROCKSDB_LITE
#include <string>
#include <thread>
#include <vector>
#include "db/auto_roll_logger.h"
#include <errno.h>
#include <sys/stat.h>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <fstream>
#include <iostream>
#include <iterator>
#include <algorithm>
#include "db/auto_roll_logger.h"
#include <string>
#include <thread>
#include <vector>
#include "port/port.h"
#include "rocksdb/db.h"
#include "util/logging.h"
#include "util/sync_point.h"
#include "util/testharness.h"
#include "rocksdb/db.h"
#include <sys/stat.h>
#include <errno.h>
namespace rocksdb {
@ -62,10 +63,10 @@ Env* AutoRollLoggerTest::env = Env::Default();
// In this test we only want to Log some simple log message with
// no format. LogMessage() provides such a simple interface and
// avoids the [format-security] warning which occurs when you
// call Log(logger, log_message) directly.
// call ROCKS_LOG_INFO(logger, log_message) directly.
namespace {
void LogMessage(Logger* logger, const char* message) {
Log(logger, "%s", message);
ROCKS_LOG_INFO(logger, "%s", message);
}
void LogMessage(const InfoLogLevel log_level, Logger* logger,
@ -332,12 +333,12 @@ TEST_F(AutoRollLoggerTest, InfoLogLevel) {
logger.SetInfoLogLevel((InfoLogLevel)log_level);
// again, messages with level smaller than log_level will not be logged.
Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s", kSampleMessage.c_str());
Debug(&logger, "%s", kSampleMessage.c_str());
Info(&logger, "%s", kSampleMessage.c_str());
Warn(&logger, "%s", kSampleMessage.c_str());
Error(&logger, "%s", kSampleMessage.c_str());
Fatal(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_HEADER(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_DEBUG(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_INFO(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_WARN(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_ERROR(&logger, "%s", kSampleMessage.c_str());
ROCKS_LOG_FATAL(&logger, "%s", kSampleMessage.c_str());
log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
}
}
@ -411,8 +412,7 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) {
} else if (test_num == 1) {
// HEADER_LEVEL should make this behave like calling Header()
for (size_t i = 0; i < MAX_HEADERS; i++) {
Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s %d",
HEADER_STR.c_str(), i);
ROCKS_LOG_HEADER(&logger, "%s %d", HEADER_STR.c_str(), i);
}
}

@ -205,8 +205,8 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
}
if (result.level0_file_num_compaction_trigger == 0) {
Warn(db_options.info_log.get(),
"level0_file_num_compaction_trigger cannot be 0");
ROCKS_LOG_WARN(db_options.info_log.get(),
"level0_file_num_compaction_trigger cannot be 0");
result.level0_file_num_compaction_trigger = 1;
}
@ -214,14 +214,14 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.level0_slowdown_writes_trigger ||
result.level0_slowdown_writes_trigger <
result.level0_file_num_compaction_trigger) {
Warn(db_options.info_log.get(),
"This condition must be satisfied: "
"level0_stop_writes_trigger(%d) >= "
"level0_slowdown_writes_trigger(%d) >= "
"level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger);
ROCKS_LOG_WARN(db_options.info_log.get(),
"This condition must be satisfied: "
"level0_stop_writes_trigger(%d) >= "
"level0_slowdown_writes_trigger(%d) >= "
"level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger);
if (result.level0_slowdown_writes_trigger <
result.level0_file_num_compaction_trigger) {
result.level0_slowdown_writes_trigger =
@ -231,14 +231,14 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.level0_slowdown_writes_trigger) {
result.level0_stop_writes_trigger = result.level0_slowdown_writes_trigger;
}
Warn(db_options.info_log.get(),
"Adjust the value to "
"level0_stop_writes_trigger(%d)"
"level0_slowdown_writes_trigger(%d)"
"level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger);
ROCKS_LOG_WARN(db_options.info_log.get(),
"Adjust the value to "
"level0_stop_writes_trigger(%d)"
"level0_slowdown_writes_trigger(%d)"
"level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger);
}
if (result.soft_pending_compaction_bytes_limit == 0) {
@ -386,27 +386,27 @@ ColumnFamilyData::ColumnFamilyData(
} else if (ioptions_.compaction_style == kCompactionStyleNone) {
compaction_picker_.reset(new NullCompactionPicker(
ioptions_, &internal_comparator_));
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
"Column family %s does not use any background compaction. "
"Compactions can only be done via CompactFiles\n",
GetName().c_str());
ROCKS_LOG_WARN(ioptions_.info_log,
"Column family %s does not use any background compaction. "
"Compactions can only be done via CompactFiles\n",
GetName().c_str());
#endif // !ROCKSDB_LITE
} else {
Log(InfoLogLevel::ERROR_LEVEL, ioptions_.info_log,
"Unable to recognize the specified compaction style %d. "
"Column family %s will use kCompactionStyleLevel.\n",
ioptions_.compaction_style, GetName().c_str());
ROCKS_LOG_ERROR(ioptions_.info_log,
"Unable to recognize the specified compaction style %d. "
"Column family %s will use kCompactionStyleLevel.\n",
ioptions_.compaction_style, GetName().c_str());
compaction_picker_.reset(
new LevelCompactionPicker(ioptions_, &internal_comparator_));
}
if (column_family_set_->NumberOfColumnFamilies() < 10) {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
"--------------- Options for column family [%s]:\n", name.c_str());
ROCKS_LOG_INFO(ioptions_.info_log,
"--------------- Options for column family [%s]:\n",
name.c_str());
initial_cf_options_.Dump(ioptions_.info_log);
} else {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
"\t(skipping printing options)\n");
ROCKS_LOG_INFO(ioptions_.info_log, "\t(skipping printing options)\n");
}
}
@ -620,7 +620,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
if (imm()->NumNotFlushed() >= mutable_cf_options.max_write_buffer_number) {
write_controller_token_ = write_controller->GetStopToken();
internal_stats_->AddCFStats(InternalStats::MEMTABLE_COMPACTION, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stopping writes because we have %d immutable memtables "
"(waiting for flush), max_write_buffer_number is set to %d",
name_.c_str(), imm()->NumNotFlushed(),
@ -634,9 +635,9 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
internal_stats_->AddCFStats(
InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1);
}
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
"[%s] Stopping writes because we have %d level-0 files",
name_.c_str(), vstorage->l0_delay_trigger_count());
ROCKS_LOG_WARN(ioptions_.info_log,
"[%s] Stopping writes because we have %d level-0 files",
name_.c_str(), vstorage->l0_delay_trigger_count());
} else if (!mutable_cf_options.disable_auto_compactions &&
mutable_cf_options.hard_pending_compaction_bytes_limit > 0 &&
compaction_needed_bytes >=
@ -644,7 +645,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
write_controller_token_ = write_controller->GetStopToken();
internal_stats_->AddCFStats(
InternalStats::HARD_PENDING_COMPACTION_BYTES_LIMIT, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stopping writes because of estimated pending compaction "
"bytes %" PRIu64,
name_.c_str(), compaction_needed_bytes);
@ -656,7 +658,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
prev_compaction_needed_bytes_, was_stopped,
mutable_cf_options.disable_auto_compactions);
internal_stats_->AddCFStats(InternalStats::MEMTABLE_SLOWDOWN, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stalling writes because we have %d immutable memtables "
"(waiting for flush), max_write_buffer_number is set to %d "
"rate %" PRIu64,
@ -679,11 +682,11 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
internal_stats_->AddCFStats(
InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1);
}
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
"[%s] Stalling writes because we have %d level-0 files "
"rate %" PRIu64,
name_.c_str(), vstorage->l0_delay_trigger_count(),
write_controller->delayed_write_rate());
ROCKS_LOG_WARN(ioptions_.info_log,
"[%s] Stalling writes because we have %d level-0 files "
"rate %" PRIu64,
name_.c_str(), vstorage->l0_delay_trigger_count(),
write_controller->delayed_write_rate());
} else if (!mutable_cf_options.disable_auto_compactions &&
mutable_cf_options.soft_pending_compaction_bytes_limit > 0 &&
vstorage->estimated_compaction_needed_bytes() >=
@ -705,7 +708,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
mutable_cf_options.disable_auto_compactions);
internal_stats_->AddCFStats(
InternalStats::SOFT_PENDING_COMPACTION_BYTES_LIMIT, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stalling writes because of estimated pending compaction "
"bytes %" PRIu64 " rate %" PRIu64,
name_.c_str(), vstorage->estimated_compaction_needed_bytes(),
@ -717,7 +721,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
mutable_cf_options.level0_slowdown_writes_trigger)) {
write_controller_token_ =
write_controller->GetCompactionPressureToken();
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Increasing compaction threads because we have %d level-0 "
"files ",
name_.c_str(), vstorage->l0_delay_trigger_count());
@ -730,7 +735,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
write_controller_token_ =
write_controller->GetCompactionPressureToken();
if (mutable_cf_options.soft_pending_compaction_bytes_limit > 0) {
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Increasing compaction threads because of estimated pending "
"compaction "
"bytes %" PRIu64,

@ -154,8 +154,8 @@ Status CompactedDBImpl::Open(const Options& options,
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
Status s = db->Init(options);
if (s.ok()) {
Log(INFO_LEVEL, db->immutable_db_options_.info_log,
"Opened the db as fully compacted mode");
ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
"Opened the db as fully compacted mode");
LogFlush(db->immutable_db_options_.info_log);
*dbptr = db.release();
}

@ -600,7 +600,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
write_amp = stats.bytes_written /
static_cast<double>(stats.bytes_read_non_output_levels);
}
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer_,
"[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
"files in(%d, %d) out(%d) "
@ -1091,12 +1091,12 @@ Status CompactionJob::FinishCompactionOutputFile(
tp = sub_compact->builder->GetTableProperties();
sub_compact->current_output()->table_properties =
std::make_shared<TableProperties>(tp);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64
" keys, %" PRIu64 " bytes%s",
cfd->GetName().c_str(), job_id_, output_number, current_entries,
current_bytes,
meta->marked_for_compaction ? " (need compaction)" : "");
ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64
" keys, %" PRIu64 " bytes%s",
cfd->GetName().c_str(), job_id_, output_number,
current_entries, current_bytes,
meta->marked_for_compaction ? " (need compaction)" : "");
}
}
std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(),
@ -1142,17 +1142,16 @@ Status CompactionJob::InstallCompactionResults(
if (!versions_->VerifyCompactionFileConsistency(compaction)) {
Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Compaction %s aborted",
compaction->column_family_data()->GetName().c_str(), job_id_,
compaction->InputLevelSummary(&inputs_summary));
ROCKS_LOG_ERROR(db_options_.info_log, "[%s] [JOB %d] Compaction %s aborted",
compaction->column_family_data()->GetName().c_str(),
job_id_, compaction->InputLevelSummary(&inputs_summary));
return Status::Corruption("Compaction input files inconsistent");
}
{
Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes",
ROCKS_LOG_INFO(
db_options_.info_log, "[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes",
compaction->column_family_data()->GetName().c_str(), job_id_,
compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes);
}
@ -1200,7 +1199,8 @@ Status CompactionJob::OpenCompactionOutputFile(
unique_ptr<WritableFile> writable_file;
Status s = NewWritableFile(env_, fname, &writable_file, env_options_);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
ROCKS_LOG_ERROR(
db_options_.info_log,
"[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64
" fails at NewWritableFile with status %s",
sub_compact->compaction->column_family_data()->GetName().c_str(),
@ -1376,14 +1376,14 @@ void CompactionJob::LogCompaction() {
// we're not logging
if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) {
Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Compacting %s, score %.2f", cfd->GetName().c_str(),
job_id_, compaction->InputLevelSummary(&inputs_summary),
compaction->score());
ROCKS_LOG_INFO(
db_options_.info_log, "[%s] [JOB %d] Compacting %s, score %.2f",
cfd->GetName().c_str(), job_id_,
compaction->InputLevelSummary(&inputs_summary), compaction->score());
char scratch[2345];
compaction->Summary(scratch, sizeof(scratch));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] Compaction start summary: %s\n", cfd->GetName().c_str(), scratch);
ROCKS_LOG_INFO(db_options_.info_log, "[%s] Compaction start summary: %s\n",
cfd->GetName().c_str(), scratch);
// build event logger report
auto stream = event_logger_->Log();
stream << "job" << job_id_ << "event"

@ -266,7 +266,8 @@ bool CompactionPicker::ExpandWhileOverlapping(const std::string& cf_name,
// If, after the expansion, there are files that are already under
// compaction, then we must drop/cancel this compaction.
if (FilesInCompaction(inputs->files)) {
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] ExpandWhileOverlapping() failure because some of the necessary"
" compaction input files are currently being compacted.",
cf_name.c_str());
@ -514,14 +515,15 @@ bool CompactionPicker::SetupOtherInputs(
}
}
if (expand_inputs) {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64
"+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
" (%" PRIu64 "+%" PRIu64 "bytes)\n",
cf_name.c_str(), input_level, inputs->size(),
output_level_inputs->size(), inputs_size, output_level_inputs_size,
expanded_inputs.size(), output_level_inputs->size(),
expanded_inputs_size, output_level_inputs_size);
ROCKS_LOG_INFO(ioptions_.info_log,
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
"(%" PRIu64 "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt
"+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 "bytes)\n",
cf_name.c_str(), input_level, inputs->size(),
output_level_inputs->size(), inputs_size,
output_level_inputs_size, expanded_inputs.size(),
output_level_inputs->size(), expanded_inputs_size,
output_level_inputs_size);
inputs->files = expanded_inputs.files;
}
}
@ -1442,24 +1444,25 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if (sorted_runs.size() == 0 ||
sorted_runs.size() <
(unsigned int)mutable_cf_options.level0_file_num_compaction_trigger) {
LogToBuffer(log_buffer, "[%s] Universal: nothing to do\n", cf_name.c_str());
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: nothing to do\n",
cf_name.c_str());
TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return",
nullptr);
return nullptr;
}
VersionStorageInfo::LevelSummaryStorage tmp;
LogToBuffer(log_buffer, 3072,
"[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
cf_name.c_str(), sorted_runs.size(),
vstorage->LevelSummary(&tmp));
ROCKS_LOG_BUFFER_MAX_SZ(
log_buffer, 3072,
"[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
cf_name.c_str(), sorted_runs.size(), vstorage->LevelSummary(&tmp));
// Check for size amplification first.
Compaction* c;
if ((c = PickCompactionUniversalSizeAmp(cf_name, mutable_cf_options, vstorage,
score, sorted_runs, log_buffer)) !=
nullptr) {
LogToBuffer(log_buffer, "[%s] Universal: compacting for size amp\n",
cf_name.c_str());
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: compacting for size amp\n",
cf_name.c_str());
} else {
// Size amplification is within limits. Try reducing read
// amplification while maintaining file size ratios.
@ -1468,8 +1471,9 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, ratio, UINT_MAX,
sorted_runs, log_buffer)) != nullptr) {
LogToBuffer(log_buffer, "[%s] Universal: compacting for size ratio\n",
cf_name.c_str());
ROCKS_LOG_BUFFER(log_buffer,
"[%s] Universal: compacting for size ratio\n",
cf_name.c_str());
} else {
// Size amplification and file size ratios are within configured limits.
// If max read amplification is exceeding configured limits, then force
@ -1497,9 +1501,9 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
num_files, sorted_runs, log_buffer)) != nullptr) {
LogToBuffer(log_buffer,
"[%s] Universal: compacting for file num -- %u\n",
cf_name.c_str(), num_files);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] Universal: compacting for file num -- %u\n",
cf_name.c_str(), num_files);
}
}
}
@ -1640,10 +1644,10 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
}
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf));
LogToBuffer(log_buffer,
"[%s] Universal: %s"
"[%d] being compacted, skipping",
cf_name.c_str(), file_num_buf, loop);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] Universal: %s"
"[%d] being compacted, skipping",
cf_name.c_str(), file_num_buf, loop);
sr = nullptr;
}
@ -1654,8 +1658,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (sr != nullptr) {
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, "[%s] Universal: Possible candidate %s[%d].",
cf_name.c_str(), file_num_buf, loop);
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Possible candidate %s[%d].",
cf_name.c_str(), file_num_buf, loop);
}
// Check if the succeeding files need compaction.
@ -1706,8 +1710,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
const SortedRun* skipping_sr = &sorted_runs[i];
char file_num_buf[256];
skipping_sr->DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
LogToBuffer(log_buffer, "[%s] Universal: Skipping %s", cf_name.c_str(),
file_num_buf);
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Skipping %s",
cf_name.c_str(), file_num_buf);
}
}
}
@ -1768,8 +1772,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
}
char file_num_buf[256];
picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), i);
LogToBuffer(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(),
file_num_buf);
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(),
file_num_buf);
}
CompactionReason compaction_reason;
@ -1815,9 +1819,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
}
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s",
cf_name.c_str(), file_num_buf, loop,
" cannot be a candidate to reduce size amp.\n");
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s",
cf_name.c_str(), file_num_buf, loop,
" cannot be a candidate to reduce size amp.\n");
sr = nullptr;
}
@ -1827,10 +1831,10 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
{
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer,
"[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s",
cf_name.c_str(), file_num_buf, start_index,
" to reduce size amp.\n");
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s",
cf_name.c_str(), file_num_buf, start_index, " to reduce size amp.\n");
}
// keep adding up all the remaining files
@ -1839,7 +1843,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
if (sr->being_compacted) {
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer, "[%s] Universal: Possible candidate %s[%d] %s",
cf_name.c_str(), file_num_buf, start_index,
" is already being compacted. No size amp reduction possible.\n");
@ -1857,14 +1861,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
// size amplification = percentage of additional size
if (candidate_size * 100 < ratio * earliest_file_size) {
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64
" earliest-file-size %" PRIu64,
cf_name.c_str(), candidate_size, earliest_file_size);
return nullptr;
} else {
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] Universal: size amp needed. newer-files-total-size %" PRIu64
" earliest-file-size %" PRIu64,
@ -1898,8 +1902,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
}
char file_num_buf[256];
picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
LogToBuffer(log_buffer, "[%s] Universal: size amp picking %s",
cf_name.c_str(), file_num_buf);
ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: size amp picking %s",
cf_name.c_str(), file_num_buf);
}
return new Compaction(
@ -1934,19 +1938,20 @@ Compaction* FIFOCompactionPicker::PickCompaction(
if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size ||
level_files.size() == 0) {
// total size not exceeded
LogToBuffer(log_buffer,
"[%s] FIFO compaction: nothing to do. Total size %" PRIu64
", max size %" PRIu64 "\n",
cf_name.c_str(), total_size,
ioptions_.compaction_options_fifo.max_table_files_size);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] FIFO compaction: nothing to do. Total size %" PRIu64
", max size %" PRIu64 "\n",
cf_name.c_str(), total_size,
ioptions_.compaction_options_fifo.max_table_files_size);
return nullptr;
}
if (!level0_compactions_in_progress_.empty()) {
LogToBuffer(log_buffer,
"[%s] FIFO compaction: Already executing compaction. No need "
"to run parallel compactions since compactions are very fast",
cf_name.c_str());
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] FIFO compaction: Already executing compaction. No need "
"to run parallel compactions since compactions are very fast",
cf_name.c_str());
return nullptr;
}
@ -1960,9 +1965,9 @@ Compaction* FIFOCompactionPicker::PickCompaction(
inputs[0].files.push_back(f);
char tmp_fsize[16];
AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize));
LogToBuffer(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64
" with size %s for deletion",
cf_name.c_str(), f->fd.GetNumber(), tmp_fsize);
ROCKS_LOG_BUFFER(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64
" with size %s for deletion",
cf_name.c_str(), f->fd.GetNumber(), tmp_fsize);
if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size) {
break;
}

@ -34,12 +34,11 @@ Status DBImpl::DisableFileDeletions() {
InstrumentedMutexLock l(&mutex_);
++disable_delete_obsolete_files_;
if (disable_delete_obsolete_files_ == 1) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"File Deletions Disabled");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Disabled");
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"File Deletions Disabled, but already disabled. Counter: %d",
disable_delete_obsolete_files_);
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"File Deletions Disabled, but already disabled. Counter: %d",
disable_delete_obsolete_files_);
}
return Status::OK();
}
@ -58,12 +57,12 @@ Status DBImpl::EnableFileDeletions(bool force) {
--disable_delete_obsolete_files_;
}
if (disable_delete_obsolete_files_ == 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"File Deletions Enabled");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Enabled");
should_purge_files = true;
FindObsoleteFiles(&job_context, true);
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"File Deletions Enable, but not really enabled. Counter: %d",
disable_delete_obsolete_files_);
}
@ -109,8 +108,8 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
if (!status.ok()) {
mutex_.Unlock();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Cannot Flush data %s\n", status.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log, "Cannot Flush data %s\n",
status.ToString().c_str());
return status;
}
}

@ -299,13 +299,14 @@ CompressionType GetCompressionFlush(
}
void DumpSupportInfo(Logger* logger) {
Header(logger, "Compression algorithms supported:");
Header(logger, "\tSnappy supported: %d", Snappy_Supported());
Header(logger, "\tZlib supported: %d", Zlib_Supported());
Header(logger, "\tBzip supported: %d", BZip2_Supported());
Header(logger, "\tLZ4 supported: %d", LZ4_Supported());
Header(logger, "\tZSTD supported: %d", ZSTD_Supported());
Header(logger, "Fast CRC32 supported: %d", crc32c::IsFastCrc32Supported());
ROCKS_LOG_HEADER(logger, "Compression algorithms supported:");
ROCKS_LOG_HEADER(logger, "\tSnappy supported: %d", Snappy_Supported());
ROCKS_LOG_HEADER(logger, "\tZlib supported: %d", Zlib_Supported());
ROCKS_LOG_HEADER(logger, "\tBzip supported: %d", BZip2_Supported());
ROCKS_LOG_HEADER(logger, "\tLZ4 supported: %d", LZ4_Supported());
ROCKS_LOG_HEADER(logger, "\tZSTD supported: %d", ZSTD_Supported());
ROCKS_LOG_HEADER(logger, "Fast CRC32 supported: %d",
crc32c::IsFastCrc32Supported());
}
} // namespace
@ -387,8 +388,8 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname)
void DBImpl::CancelAllBackgroundWork(bool wait) {
InstrumentedMutexLock l(&mutex_);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Shutdown: canceling all background work");
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Shutdown: canceling all background work");
if (!shutting_down_.load(std::memory_order_acquire) &&
has_unpersisted_data_.load(std::memory_order_relaxed) &&
@ -514,8 +515,7 @@ DBImpl::~DBImpl() {
env_->UnlockFile(db_lock_);
}
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Shutdown complete");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "Shutdown complete");
LogFlush(immutable_db_options_.info_log);
}
@ -527,8 +527,7 @@ Status DBImpl::NewDB() {
Status s;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Creating manifest 1 \n");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "Creating manifest 1 \n");
const std::string manifest = DescriptorFileName(dbname_, 1);
{
unique_ptr<WritableFile> file;
@ -562,8 +561,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
if (s->ok() || immutable_db_options_.paranoid_checks) {
// No change needed
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"Ignoring error %s", s->ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log, "Ignoring error %s",
s->ToString().c_str());
*s = Status::OK();
}
}
@ -580,8 +579,8 @@ const Status DBImpl::CreateArchivalDirectory() {
void DBImpl::PrintStatistics() {
auto dbstats = immutable_db_options_.statistics.get();
if (dbstats) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"STATISTICS:\n %s", dbstats->ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log, "STATISTICS:\n %s",
dbstats->ToString().c_str());
}
}
@ -650,18 +649,16 @@ void DBImpl::MaybeDumpStats() {
default_cf_internal_stats_->GetStringProperty(
*db_property_info, DB::Properties::kDBStats, &stats);
}
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"------- DUMPING STATS -------");
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, "%s",
stats.c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"------- DUMPING STATS -------");
ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
if (immutable_db_options_.dump_malloc_stats) {
stats.clear();
DumpMallocStats(&stats);
if (!stats.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"------- Malloc STATS -------");
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, "%s",
stats.c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"------- Malloc STATS -------");
ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
}
}
#endif // !ROCKSDB_LITE
@ -899,8 +896,9 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
auto& earliest = *alive_log_files_.begin();
if (immutable_db_options_.recycle_log_file_num >
log_recycle_files.size()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"adding log %" PRIu64 " to recycle list\n", earliest.number);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"adding log %" PRIu64 " to recycle list\n",
earliest.number);
log_recycle_files.push_back(earliest.number);
} else {
job_context->log_delete_files.push_back(earliest.number);
@ -962,19 +960,22 @@ void DBImpl::DeleteObsoleteFileImpl(Status file_deletion_status, int job_id,
file_deletion_status = env_->DeleteFile(fname);
}
if (file_deletion_status.ok()) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id,
fname.c_str(), type, number, file_deletion_status.ToString().c_str());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id,
fname.c_str(), type, number,
file_deletion_status.ToString().c_str());
} else if (env_->FileExists(fname).IsNotFound()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[JOB %d] Tried to delete a non-existing file %s type=%d #%" PRIu64
" -- %s\n",
job_id, fname.c_str(), type, number,
file_deletion_status.ToString().c_str());
} else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n", job_id,
fname.c_str(), type, number, file_deletion_status.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n",
job_id, fname.c_str(), type, number,
file_deletion_status.ToString().c_str());
}
if (type == kTableFile) {
EventHelpers::LogAndNotifyTableFileDeletion(
@ -1038,12 +1039,12 @@ void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) {
candidate_files.end());
if (state.prev_total_log_size > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Try to delete WAL files size %" PRIu64
", prev total WAL file size %" PRIu64
", number of live WAL files %" ROCKSDB_PRIszt ".\n",
state.job_id, state.size_log_to_delete, state.prev_total_log_size,
state.num_alive_log_files);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Try to delete WAL files size %" PRIu64
", prev total WAL file size %" PRIu64
", number of live WAL files %" ROCKSDB_PRIszt ".\n",
state.job_id, state.size_log_to_delete,
state.prev_total_log_size, state.num_alive_log_files);
}
std::vector<std::string> old_info_log_files;
@ -1152,20 +1153,22 @@ void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) {
? dbname_
: immutable_db_options_.db_log_dir) +
"/" + to_delete;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s\n", state.job_id,
full_path_to_delete.c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s\n", state.job_id,
full_path_to_delete.c_str());
Status s = env_->DeleteFile(full_path_to_delete);
if (!s.ok()) {
if (env_->FileExists(full_path_to_delete).IsNotFound()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[JOB %d] Tried to delete non-existing info log file %s FAILED "
"-- %s\n",
state.job_id, to_delete.c_str(), s.ToString().c_str());
} else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s FAILED -- %s\n", state.job_id,
to_delete.c_str(), s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s FAILED -- %s\n",
state.job_id, to_delete.c_str(),
s.ToString().c_str());
}
}
}
@ -1399,10 +1402,9 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
const char* fname;
Status* status; // nullptr if immutable_db_options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::WARN_LEVEL,
info_log, "%s%s: dropping %d bytes; %s",
(this->status == nullptr ? "(ignoring error) " : ""),
fname, static_cast<int>(bytes), s.ToString().c_str());
ROCKS_LOG_WARN(info_log, "%s%s: dropping %d bytes; %s",
(this->status == nullptr ? "(ignoring error) " : ""),
fname, static_cast<int>(bytes), s.ToString().c_str());
if (this->status != nullptr && this->status->ok()) {
*this->status = s;
}
@ -1458,15 +1460,15 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
// Open the log file
std::string fname = LogFileName(immutable_db_options_.wal_dir, log_number);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Recovering log #%" PRIu64 " mode %d", log_number,
immutable_db_options_.wal_recovery_mode);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Recovering log #%" PRIu64 " mode %d", log_number,
immutable_db_options_.wal_recovery_mode);
auto logFileDropped = [this, &fname]() {
uint64_t bytes;
if (env_->GetFileSize(fname, &bytes).ok()) {
auto info_log = immutable_db_options_.info_log.get();
Log(InfoLogLevel::WARN_LEVEL, info_log, "%s: dropping %d bytes",
fname.c_str(), static_cast<int>(bytes));
ROCKS_LOG_WARN(info_log, "%s: dropping %d bytes", fname.c_str(),
static_cast<int>(bytes));
}
};
if (stop_replay_by_wal_filter) {
@ -1597,7 +1599,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
int new_count = WriteBatchInternal::Count(&new_batch);
int original_count = WriteBatchInternal::Count(&batch);
if (new_count > original_count) {
Log(InfoLogLevel::FATAL_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_FATAL(
immutable_db_options_.info_log,
"Recovering log #%" PRIu64
" mode %d log filter %s returned "
"more records (%d) than original (%d) which is not allowed. "
@ -1675,9 +1678,10 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
// We should ignore the error but not continue replaying
status = Status::OK();
stop_replay_for_corruption = true;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Point in time recovered to log #%" PRIu64 " seq #%" PRIu64,
log_number, *next_sequence);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Point in time recovered to log #%" PRIu64
" seq #%" PRIu64,
log_number, *next_sequence);
} else {
assert(immutable_db_options_.wal_recovery_mode ==
WALRecoveryMode::kTolerateCorruptedTailRecords ||
@ -1788,10 +1792,10 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
TableProperties table_properties;
{
ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": started",
cfd->GetName().c_str(), meta.fd.GetNumber());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": started",
cfd->GetName().c_str(), meta.fd.GetNumber());
// Get the latest mutable cf options while the mutex is still locked
const MutableCFOptions mutable_cf_options =
@ -1817,11 +1821,11 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
cfd->internal_stats(), TableFileCreationReason::kRecovery,
&event_logger_, job_id);
LogFlush(immutable_db_options_.info_log);
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s",
cfd->GetName().c_str(), meta.fd.GetNumber(), meta.fd.GetFileSize(),
s.ToString().c_str());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s",
cfd->GetName().c_str(), meta.fd.GetNumber(),
meta.fd.GetFileSize(), s.ToString().c_str());
mutex_.Lock();
}
}
@ -1870,9 +1874,9 @@ Status DBImpl::SyncClosedLogs(JobContext* job_context) {
mutex_.Unlock();
for (log::Writer* log : logs_to_sync) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
log->get_log_number());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
log->get_log_number());
s = log->file()->Sync(immutable_db_options_.use_fsync);
}
if (s.ok()) {
@ -1947,8 +1951,9 @@ Status DBImpl::FlushMemTableToOutputFile(
*made_progress = 1;
}
VersionStorageInfo::LevelSummaryStorage tmp;
LogToBuffer(log_buffer, "[%s] Level summary: %s\n", cfd->GetName().c_str(),
cfd->current()->storage_info()->LevelSummary(&tmp));
ROCKS_LOG_BUFFER(log_buffer, "[%s] Level summary: %s\n",
cfd->GetName().c_str(),
cfd->current()->storage_info()->LevelSummary(&tmp));
}
if (!s.ok() && !s.IsShutdownInProgress() &&
@ -2114,8 +2119,8 @@ Status DBImpl::CompactRange(const CompactRangeOptions& options,
}
if (options.change_level) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[RefitLevel] waiting for background threads to stop");
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[RefitLevel] waiting for background threads to stop");
s = PauseBackgroundWork();
if (s.ok()) {
s = ReFitLevel(cfd, final_output_level, options.target_level);
@ -2332,10 +2337,10 @@ Status DBImpl::CompactFilesImpl(
} else if (status.IsShutdownInProgress()) {
// Ignore compaction errors found during shutting down
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"[%s] [JOB %d] Compaction error: %s",
c->column_family_data()->GetName().c_str(), job_context->job_id,
status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"[%s] [JOB %d] Compaction error: %s",
c->column_family_data()->GetName().c_str(),
job_context->job_id, status.ToString().c_str());
if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
bg_error_ = status;
}
@ -2442,9 +2447,9 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
#else
auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
if (options_map.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"SetOptions() on column family [%s], empty input",
cfd->GetName().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"SetOptions() on column family [%s], empty input",
cfd->GetName().c_str());
return Status::InvalidArgument("empty input");
}
@ -2471,15 +2476,16 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
}
}
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"SetOptions() on column family [%s], inputs:", cfd->GetName().c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"SetOptions() on column family [%s], inputs:",
cfd->GetName().c_str());
for (const auto& o : options_map) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, "%s: %s\n",
o.first.c_str(), o.second.c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
o.second.c_str());
}
if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[%s] SetOptions() succeeded", cfd->GetName().c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] SetOptions() succeeded", cfd->GetName().c_str());
new_options.Dump(immutable_db_options_.info_log.get());
if (!persist_options_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) {
@ -2487,13 +2493,13 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
"SetOptions() succeeded, but unable to persist options",
persist_options_status.ToString());
}
Warn(immutable_db_options_.info_log,
"Unable to persist options in SetOptions() -- %s",
persist_options_status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in SetOptions() -- %s",
persist_options_status.ToString().c_str());
}
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"[%s] SetOptions() failed", cfd->GetName().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log, "[%s] SetOptions() failed",
cfd->GetName().c_str());
}
LogFlush(immutable_db_options_.info_log);
return s;
@ -2506,8 +2512,8 @@ Status DBImpl::SetDBOptions(
return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
if (options_map.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"SetDBOptions(), empty input.");
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"SetDBOptions(), empty input.");
return Status::InvalidArgument("empty input");
}
@ -2537,15 +2543,13 @@ Status DBImpl::SetDBOptions(
persist_options_status = PersistOptions();
}
}
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"SetDBOptions(), inputs:");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:");
for (const auto& o : options_map) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, "%s: %s\n",
o.first.c_str(), o.second.c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
o.second.c_str());
}
if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"SetDBOptions() succeeded");
ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions() succeeded");
new_options.Dump(immutable_db_options_.info_log.get());
if (!persist_options_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) {
@ -2553,13 +2557,12 @@ Status DBImpl::SetDBOptions(
"SetDBOptions() succeeded, but unable to persist options",
persist_options_status.ToString());
}
Warn(immutable_db_options_.info_log,
"Unable to persist options in SetDBOptions() -- %s",
persist_options_status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in SetDBOptions() -- %s",
persist_options_status.ToString().c_str());
}
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"SetDBOptions failed");
ROCKS_LOG_WARN(immutable_db_options_.info_log, "SetDBOptions failed");
}
LogFlush(immutable_db_options_.info_log);
return s;
@ -2611,8 +2614,8 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
// only allow one thread refitting
if (refitting_level_) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[ReFitLevel] another thread is refitting");
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[ReFitLevel] another thread is refitting");
return Status::NotSupported("another thread is refitting");
}
refitting_level_ = true;
@ -2639,9 +2642,9 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
}
}
if (to_level != level) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] Before refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] Before refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data());
VersionEdit edit;
edit.SetColumnFamily(cfd->GetID());
@ -2652,23 +2655,22 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
f->smallest_seqno, f->largest_seqno,
f->marked_for_compaction);
}
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
edit.DebugString().data());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
edit.DebugString().data());
status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_,
directories_.GetDbDir());
superversion_to_free.reset(InstallSuperVersionAndScheduleWork(
cfd, new_superversion.release(), mutable_cf_options));
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] LogAndApply: %s\n", cfd->GetName().c_str(),
status.ToString().data());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
cfd->GetName().c_str(), status.ToString().data());
if (status.ok()) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log,
"[%s] After refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data());
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] After refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data());
}
}
@ -2850,7 +2852,8 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_);
if (exclusive) {
while (bg_compaction_scheduled_ > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[%s] Manual compaction waiting for all other scheduled background "
"compactions to finish",
cfd->GetName().c_str());
@ -2858,8 +2861,8 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
}
}
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[%s] Manual compaction starting", cfd->GetName().c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] Manual compaction starting", cfd->GetName().c_str());
// We don't check bg_error_ here, because if we get the error in compaction,
// the compaction will set manual.status to bg_error_ and set manual.done to
@ -3240,7 +3243,7 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
if (cfd != nullptr) {
const MutableCFOptions mutable_cf_options =
*cfd->GetLatestMutableCFOptions();
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer,
"Calling FlushMemTableToOutputFile with column "
"family [%s], flush slots available %d, compaction slots allowed %d, "
@ -3282,10 +3285,10 @@ void DBImpl::BackgroundCallFlush() {
default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
mutex_.Unlock();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Waiting after background flush error: %s"
"Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt);
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Waiting after background flush error: %s"
"Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt);
log_buffer.FlushBufferToLog();
LogFlush(immutable_db_options_.info_log);
env_->SleepForMicroseconds(1000000);
@ -3360,10 +3363,10 @@ void DBImpl::BackgroundCallCompaction(void* arg) {
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
mutex_.Unlock();
log_buffer.FlushBufferToLog();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Waiting after background compaction error: %s, "
"Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt);
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Waiting after background compaction error: %s, "
"Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt);
LogFlush(immutable_db_options_.info_log);
env_->SleepForMicroseconds(1000000);
mutex_.Lock();
@ -3464,22 +3467,23 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
if (!c) {
m->done = true;
m->manual_end = nullptr;
LogToBuffer(log_buffer,
"[%s] Manual compaction from level-%d from %s .. "
"%s; nothing to do\n",
m->cfd->GetName().c_str(), m->input_level,
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"));
ROCKS_LOG_BUFFER(log_buffer,
"[%s] Manual compaction from level-%d from %s .. "
"%s; nothing to do\n",
m->cfd->GetName().c_str(), m->input_level,
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"));
} else {
LogToBuffer(log_buffer,
"[%s] Manual compaction from level-%d to level-%d from %s .. "
"%s; will stop at %s\n",
m->cfd->GetName().c_str(), m->input_level, c->output_level(),
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"),
((m->done || m->manual_end == nullptr)
? "(end)"
: m->manual_end->DebugString().c_str()));
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] Manual compaction from level-%d to level-%d from %s .. "
"%s; will stop at %s\n",
m->cfd->GetName().c_str(), m->input_level, c->output_level(),
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"),
((m->done || m->manual_end == nullptr)
? "(end)"
: m->manual_end->DebugString().c_str()));
}
} else if (!compaction_queue_.empty()) {
// cfd is referenced here
@ -3543,7 +3547,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
if (!c) {
// Nothing to do
LogToBuffer(log_buffer, "Compaction nothing to do");
ROCKS_LOG_BUFFER(log_buffer, "Compaction nothing to do");
} else if (c->deletion_compaction()) {
// TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old
// file if there is alive snapshot pointing to it
@ -3562,9 +3566,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
&mutex_, directories_.GetDbDir());
InstallSuperVersionAndScheduleWorkWrapper(
c->column_family_data(), job_context, *c->mutable_cf_options());
LogToBuffer(log_buffer, "[%s] Deleted %d files\n",
c->column_family_data()->GetName().c_str(),
c->num_input_files(0));
ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
c->column_family_data()->GetName().c_str(),
c->num_input_files(0));
*made_progress = true;
} else if (!trivial_move_disallowed && c->IsTrivialMove()) {
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove");
@ -3592,10 +3596,11 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
f->largest, f->smallest_seqno, f->largest_seqno,
f->marked_for_compaction);
LogToBuffer(log_buffer,
"[%s] Moving #%" PRIu64 " to level-%d %" PRIu64 " bytes\n",
c->column_family_data()->GetName().c_str(),
f->fd.GetNumber(), c->output_level(), f->fd.GetFileSize());
ROCKS_LOG_BUFFER(log_buffer, "[%s] Moving #%" PRIu64
" to level-%d %" PRIu64 " bytes\n",
c->column_family_data()->GetName().c_str(),
f->fd.GetNumber(), c->output_level(),
f->fd.GetFileSize());
++moved_files;
moved_bytes += f->fd.GetFileSize();
}
@ -3618,7 +3623,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
<< "destination_level" << c->output_level() << "files" << moved_files
<< "total_files_size" << moved_bytes;
}
LogToBuffer(
ROCKS_LOG_BUFFER(
log_buffer,
"[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n",
c->column_family_data()->GetName().c_str(), moved_files,
@ -3676,8 +3681,8 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
} else if (status.IsShutdownInProgress()) {
// Ignore compaction errors found during shutting down
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"Compaction error: %s", status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log, "Compaction error: %s",
status.ToString().c_str());
if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
bg_error_ = status;
}
@ -4271,13 +4276,13 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
}
*handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Created column family [%s] (ID %u)", column_family_name.c_str(),
(unsigned)cfd->GetID());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Created column family [%s] (ID %u)",
column_family_name.c_str(), (unsigned)cfd->GetID());
} else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Creating column family [%s] FAILED -- %s",
column_family_name.c_str(), s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Creating column family [%s] FAILED -- %s",
column_family_name.c_str(), s.ToString().c_str());
}
} // InstrumentedMutexLock l(&mutex_)
@ -4292,9 +4297,9 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
"options in CreateColumnFamily()",
persist_options_status.ToString().c_str());
}
Warn(immutable_db_options_.info_log,
"Unable to persist options in CreateColumnFamily() -- %s",
persist_options_status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in CreateColumnFamily() -- %s",
persist_options_status.ToString().c_str());
}
}
return s;
@ -4357,8 +4362,8 @@ Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size *
mutable_cf_options->max_write_buffer_number;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Dropped column family with id %u\n", cfd->GetID());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Dropped column family with id %u\n", cfd->GetID());
if (!options_persist_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) {
@ -4367,14 +4372,14 @@ Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
"options in DropColumnFamily()",
options_persist_status.ToString().c_str());
}
Warn(immutable_db_options_.info_log,
"Unable to persist options in DropColumnFamily() -- %s",
options_persist_status.ToString().c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in DropColumnFamily() -- %s",
options_persist_status.ToString().c_str());
}
} else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Dropping column family with id %u FAILED -- %s\n", cfd->GetID(),
s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Dropping column family with id %u FAILED -- %s\n",
cfd->GetID(), s.ToString().c_str());
}
return s;
@ -4742,7 +4747,8 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
// thread is writing to another DB with the same write buffer, they may also
// be flushed. We may end up with flushing much more DBs than needed. It's
// suboptimal but still correct.
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"Flushing column family with largest mem table size. Write buffer is "
"using %" PRIu64 " bytes out of a total of %" PRIu64 ".",
write_buffer_manager_->memory_usage(),
@ -5074,7 +5080,8 @@ void DBImpl::MaybeFlushColumnFamilies() {
// is still nothing that we can do.
return;
} else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"Unable to release oldest log due to uncommited transaction");
unable_to_flush_oldest_log_ = true;
}
@ -5088,10 +5095,11 @@ void DBImpl::MaybeFlushColumnFamilies() {
WriteContext context;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"Flushing all column families with data in WAL number %" PRIu64
". Total log size is %" PRIu64 " while max_total_wal_size is %" PRIu64,
oldest_alive_log, total_log_size_, GetMaxTotalWalSize());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Flushing all column families with data in WAL number %" PRIu64
". Total log size is %" PRIu64
" while max_total_wal_size is %" PRIu64,
oldest_alive_log, total_log_size_, GetMaxTotalWalSize());
// no need to refcount because drop is happening in write thread, so can't
// happen while we're in the write thread
for (auto cfd : *versions_->GetColumnFamilySet()) {
@ -5234,8 +5242,9 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
EnvOptions opt_env_opt =
env_->OptimizeForLogWrite(env_options_, db_options);
if (recycle_log_number) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"reusing log %" PRIu64 " from recycle list\n", recycle_log_number);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"reusing log %" PRIu64 " from recycle list\n",
recycle_log_number);
s = env_->ReuseWritableFile(
LogFileName(immutable_db_options_.wal_dir, new_log_number),
LogFileName(immutable_db_options_.wal_dir, recycle_log_number),
@ -5273,10 +5282,10 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
NotifyOnMemTableSealed(cfd, memtable_info);
#endif //ROCKSDB_LITE
}
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"[%s] New memtable created with log file: #%" PRIu64
". Immutable memtables: %d.\n",
cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] New memtable created with log file: #%" PRIu64
". Immutable memtables: %d.\n",
cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
mutex_.Lock();
if (!s.ok()) {
// how do we fail if we're not creating new log?
@ -5630,8 +5639,8 @@ Status DBImpl::DeleteFile(std::string name) {
WalFileType log_type;
if (!ParseFileName(name, &number, &type, &log_type) ||
(type != kTableFile && type != kLogFile)) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s failed.\n", name.c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log, "DeleteFile %s failed.\n",
name.c_str());
return Status::InvalidArgument("Invalid file name");
}
@ -5639,16 +5648,17 @@ Status DBImpl::DeleteFile(std::string name) {
if (type == kLogFile) {
// Only allow deleting archived log files
if (log_type != kArchivedLogFile) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s failed - not archived log.\n", name.c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed - not archived log.\n",
name.c_str());
return Status::NotSupported("Delete only supported for archived logs");
}
status =
env_->DeleteFile(immutable_db_options_.wal_dir + "/" + name.c_str());
if (!status.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s failed -- %s.\n", name.c_str(),
status.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed -- %s.\n", name.c_str(),
status.ToString().c_str());
}
return status;
}
@ -5662,8 +5672,8 @@ Status DBImpl::DeleteFile(std::string name) {
InstrumentedMutexLock l(&mutex_);
status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd);
if (!status.ok()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s failed. File not found\n", name.c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed. File not found\n", name.c_str());
job_context.Clean();
return Status::InvalidArgument("File not found");
}
@ -5671,8 +5681,9 @@ Status DBImpl::DeleteFile(std::string name) {
// If the file is being compacted no need to delete.
if (metadata->being_compacted) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s Skipped. File about to be compacted\n", name.c_str());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"DeleteFile %s Skipped. File about to be compacted\n",
name.c_str());
job_context.Clean();
return Status::OK();
}
@ -5683,8 +5694,9 @@ Status DBImpl::DeleteFile(std::string name) {
auto* vstoreage = cfd->current()->storage_info();
for (int i = level + 1; i < cfd->NumberLevels(); i++) {
if (vstoreage->NumLevelFiles(i) != 0) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s FAILED. File not in last level\n", name.c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s FAILED. File not in last level\n",
name.c_str());
job_context.Clean();
return Status::InvalidArgument("File not in last level");
}
@ -5692,10 +5704,10 @@ Status DBImpl::DeleteFile(std::string name) {
// if level == 0, it has to be the oldest file
if (level == 0 &&
vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log,
"DeleteFile %s failed ---"
" target file in level 0 must be the oldest.",
name.c_str());
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed ---"
" target file in level 0 must be the oldest.",
name.c_str());
job_context.Clean();
return Status::InvalidArgument("File in level 0, but not oldest");
}
@ -6131,8 +6143,7 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
#endif // !ROCKSDB_LITE
if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, impl->immutable_db_options_.info_log,
"DB pointer %p", impl);
ROCKS_LOG_INFO(impl->immutable_db_options_.info_log, "DB pointer %p", impl);
LogFlush(impl->immutable_db_options_.info_log);
if (!persist_options_status.ok()) {
if (db_options.fail_if_options_file_error) {
@ -6140,9 +6151,9 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
"DB::Open() failed --- Unable to persist Options file",
persist_options_status.ToString());
}
Warn(impl->immutable_db_options_.info_log,
"Unable to persist options in DB::Open() -- %s",
persist_options_status.ToString().c_str());
ROCKS_LOG_WARN(impl->immutable_db_options_.info_log,
"Unable to persist options in DB::Open() -- %s",
persist_options_status.ToString().c_str());
}
}
if (!s.ok()) {
@ -6304,7 +6315,8 @@ void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
for (auto iter = std::next(filenames.begin(), num_files_to_keep);
iter != filenames.end(); ++iter) {
if (!env->DeleteFile(iter->second).ok()) {
Warn(info_log, "Unable to delete options file %s", iter->second.c_str());
ROCKS_LOG_WARN(info_log, "Unable to delete options file %s",
iter->second.c_str());
}
}
}
@ -6401,10 +6413,10 @@ void DumpRocksDBBuildVersion(Logger * log) {
#if !defined(IOS_CROSS_COMPILE)
// if we compile with Xcode, we don't run build_detect_vesion, so we don't
// generate util/build_version.cc
Header(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR, ROCKSDB_MINOR,
ROCKSDB_PATCH);
Header(log, "Git sha %s", rocksdb_build_git_sha);
Header(log, "Compile date %s", rocksdb_build_compile_date);
ROCKS_LOG_HEADER(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR,
ROCKSDB_MINOR, ROCKSDB_PATCH);
ROCKS_LOG_HEADER(log, "Git sha %s", rocksdb_build_git_sha);
ROCKS_LOG_HEADER(log, "Compile date %s", rocksdb_build_compile_date);
#endif
}
@ -6446,9 +6458,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Unexpected status returned from MemTable::Get: %s\n",
s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from MemTable::Get: %s\n",
s.ToString().c_str());
return s;
}
@ -6465,9 +6477,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Unexpected status returned from MemTableList::Get: %s\n",
s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from MemTableList::Get: %s\n",
s.ToString().c_str());
return s;
}
@ -6484,7 +6496,8 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
ROCKS_LOG_ERROR(
immutable_db_options_.info_log,
"Unexpected status returned from MemTableList::GetFromHistory: %s\n",
s.ToString().c_str());
@ -6507,9 +6520,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading SST files
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log,
"Unexpected status returned from Version::Get: %s\n",
s.ToString().c_str());
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from Version::Get: %s\n",
s.ToString().c_str());
return s;
}

@ -61,8 +61,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
assert(column_family);
if (target_level < 1) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"PromoteL0 FAILED. Invalid target level %d\n", target_level);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Invalid target level %d\n", target_level);
return Status::InvalidArgument("Invalid target level");
}
@ -75,8 +75,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
const auto* vstorage = cfd->current()->storage_info();
if (target_level >= vstorage->num_levels()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"PromoteL0 FAILED. Target level %d does not exist\n", target_level);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Target level %d does not exist\n",
target_level);
job_context.Clean();
return Status::InvalidArgument("Target level does not exist");
}
@ -94,9 +95,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
for (size_t i = 0; i < l0_files.size(); ++i) {
auto f = l0_files[i];
if (f->being_compacted) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
f->fd.GetNumber());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
f->fd.GetNumber());
job_context.Clean();
return Status::InvalidArgument("PromoteL0 called during L0 compaction");
}
@ -104,10 +105,10 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
if (i == 0) continue;
auto prev_f = l0_files[i - 1];
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
" have overlapping ranges\n",
prev_f->fd.GetNumber(), f->fd.GetNumber());
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
" have overlapping ranges\n",
prev_f->fd.GetNumber(), f->fd.GetNumber());
job_context.Clean();
return Status::InvalidArgument("L0 has overlapping files");
}
@ -116,8 +117,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
// Check that all levels up to target_level are empty.
for (int level = 1; level <= target_level; ++level) {
if (vstorage->NumLevelFiles(level) > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log,
"PromoteL0 FAILED. Level %d not empty\n", level);
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Level %d not empty\n", level);
job_context.Clean();
return Status::InvalidArgument(
"All levels up to target_level "

@ -20,8 +20,8 @@ namespace rocksdb {
DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
const std::string& dbname)
: DBImpl(db_options, dbname) {
Log(INFO_LEVEL, immutable_db_options_.info_log,
"Opening the db in read only mode");
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Opening the db in read only mode");
LogFlush(immutable_db_options_.info_log);
}

@ -291,9 +291,8 @@ class DBIter: public Iterator {
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
if (!ParseInternalKey(iter_->key(), ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter");
Log(InfoLogLevel::ERROR_LEVEL,
logger_, "corrupted internal key in DBIter: %s",
iter_->key().ToString(true).c_str());
ROCKS_LOG_ERROR(logger_, "corrupted internal key in DBIter: %s",
iter_->key().ToString(true).c_str());
return false;
} else {
return true;
@ -509,8 +508,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
// iter_ points to the next entry (or invalid)
void DBIter::MergeValuesNewToOld() {
if (!merge_operator_) {
Log(InfoLogLevel::ERROR_LEVEL,
logger_, "Options::merge_operator is null.");
ROCKS_LOG_ERROR(logger_, "Options::merge_operator is null.");
status_ = Status::InvalidArgument("merge_operator_ must be set.");
valid_ = false;
return;

@ -120,9 +120,9 @@ Status ExternalSstFileIngestionJob::Prepare(
}
Status s = env_->DeleteFile(f.internal_file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log,
"AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str());
}
}
}
@ -212,7 +212,8 @@ void ExternalSstFileIngestionJob::UpdateStats() {
if (f.picked_level == 0) {
total_l0_files += 1;
}
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
ROCKS_LOG_INFO(
db_options_.info_log,
"[AddFile] External SST file %s was ingested in L%d with path %s "
"(global_seqno=%" PRIu64 ")\n",
f.external_file_path.c_str(), f.picked_level,
@ -233,9 +234,9 @@ void ExternalSstFileIngestionJob::Cleanup(const Status& status) {
for (IngestedFileInfo& f : files_to_ingest_) {
Status s = env_->DeleteFile(f.internal_file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log,
"AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str());
}
}
} else if (status.ok() && ingestion_options_.move_files) {
@ -243,7 +244,8 @@ void ExternalSstFileIngestionJob::Cleanup(const Status& status) {
for (IngestedFileInfo& f : files_to_ingest_) {
Status s = env_->DeleteFile(f.external_file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
ROCKS_LOG_WARN(
db_options_.info_log,
"%s was added to DB successfully but failed to remove original "
"file link : %s",
f.external_file_path.c_str(), s.ToString().c_str());

@ -158,8 +158,8 @@ Status FlushJob::Run(FileMetaData* file_meta) {
AutoThreadOperationStageUpdater stage_run(
ThreadStatus::STAGE_FLUSH_RUN);
if (mems_.empty()) {
LogToBuffer(log_buffer_, "[%s] Nothing in memtable to flush",
cfd_->GetName().c_str());
ROCKS_LOG_BUFFER(log_buffer_, "[%s] Nothing in memtable to flush",
cfd_->GetName().c_str());
return Status::OK();
}
@ -258,7 +258,8 @@ Status FlushJob::WriteLevel0Table() {
uint64_t total_num_entries = 0, total_num_deletes = 0;
size_t total_memory_usage = 0;
for (MemTable* m : mems_) {
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
ROCKS_LOG_INFO(
db_options_.info_log,
"[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n",
cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber());
memtables.push_back(m->NewIterator(ro, &arena));
@ -286,9 +287,10 @@ Status FlushJob::WriteLevel0Table() {
&cfd_->internal_comparator(),
range_del_iters.empty() ? nullptr : &range_del_iters[0],
static_cast<int>(range_del_iters.size())));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started",
cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber());
ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started",
cfd_->GetName().c_str(), job_context_->job_id,
meta_.fd.GetNumber());
TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression",
&output_compression_);
@ -305,13 +307,14 @@ Status FlushJob::WriteLevel0Table() {
Env::IO_HIGH, &table_properties_, 0 /* level */);
LogFlush(db_options_.info_log);
}
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64
" bytes %s"
"%s",
cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber(),
meta_.fd.GetFileSize(), s.ToString().c_str(),
meta_.marked_for_compaction ? " (needs compaction)" : "");
ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64
" bytes %s"
"%s",
cfd_->GetName().c_str(), job_context_->job_id,
meta_.fd.GetNumber(), meta_.fd.GetFileSize(),
s.ToString().c_str(),
meta_.marked_for_compaction ? " (needs compaction)" : "");
if (output_file_directory_ != nullptr) {
output_file_directory_->Fsync();

@ -355,9 +355,9 @@ Status MemTableList::InstallMemtableFlushResults(
}
if (it == memlist.rbegin() || batch_file_number != m->file_number_) {
batch_file_number = m->file_number_;
LogToBuffer(log_buffer,
"[%s] Level-0 commit table #%" PRIu64 " started",
cfd->GetName().c_str(), m->file_number_);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] Level-0 commit table #%" PRIu64 " started",
cfd->GetName().c_str(), m->file_number_);
edit_list.push_back(&m->edit_);
}
batch_count++;
@ -378,9 +378,9 @@ Status MemTableList::InstallMemtableFlushResults(
if (s.ok()) { // commit new state
while (batch_count-- > 0) {
MemTable* m = current_->memlist_.back();
LogToBuffer(log_buffer, "[%s] Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " done",
cfd->GetName().c_str(), m->file_number_, mem_id);
ROCKS_LOG_BUFFER(log_buffer, "[%s] Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " done",
cfd->GetName().c_str(), m->file_number_, mem_id);
assert(m->file_number_ > 0);
current_->Remove(m, to_delete);
++mem_id;
@ -389,9 +389,9 @@ Status MemTableList::InstallMemtableFlushResults(
for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) {
MemTable* m = *it;
// commit failed. setup state so that we can flush again.
LogToBuffer(log_buffer, "Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " failed",
m->file_number_, mem_id);
ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " failed",
m->file_number_, mem_id);
m->flush_completed_ = false;
m->flush_in_progress_ = false;
m->edit_.Clear();

@ -199,13 +199,13 @@ class Repairer {
for (size_t i = 0; i < tables_.size(); i++) {
bytes += tables_[i].meta.fd.GetFileSize();
}
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"**** Repaired rocksdb %s; "
"recovered %" ROCKSDB_PRIszt " files; %" PRIu64
"bytes. "
"Some data may have been lost. "
"****",
dbname_.c_str(), tables_.size(), bytes);
ROCKS_LOG_WARN(db_options_.info_log,
"**** Repaired rocksdb %s; "
"recovered %" ROCKSDB_PRIszt " files; %" PRIu64
"bytes. "
"Some data may have been lost. "
"****",
dbname_.c_str(), tables_.size(), bytes);
}
return status;
}
@ -291,9 +291,9 @@ class Repairer {
std::string logname = LogFileName(dbname_, logs_[i]);
Status status = ConvertLogToTable(logs_[i]);
if (!status.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Log #%" PRIu64 ": ignoring conversion error: %s", logs_[i],
status.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log,
"Log #%" PRIu64 ": ignoring conversion error: %s",
logs_[i], status.ToString().c_str());
}
ArchiveFile(logname);
}
@ -306,9 +306,8 @@ class Repairer {
uint64_t lognum;
virtual void Corruption(size_t bytes, const Status& s) override {
// We print error messages for corruption, but continue repairing.
Log(InfoLogLevel::ERROR_LEVEL, info_log,
"Log #%" PRIu64 ": dropping %d bytes; %s", lognum,
static_cast<int>(bytes), s.ToString().c_str());
ROCKS_LOG_ERROR(info_log, "Log #%" PRIu64 ": dropping %d bytes; %s",
lognum, static_cast<int>(bytes), s.ToString().c_str());
}
};
@ -357,8 +356,8 @@ class Repairer {
if (status.ok()) {
counter += WriteBatchInternal::Count(&batch);
} else {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Log #%" PRIu64 ": ignoring %s", log, status.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log, "Log #%" PRIu64 ": ignoring %s",
log, status.ToString().c_str());
status = Status::OK(); // Keep going with rest of file
}
}
@ -386,9 +385,10 @@ class Repairer {
cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
{}, kMaxSequenceNumber, kNoCompression, CompressionOptions(), false,
nullptr /* internal_stats */, TableFileCreationReason::kRecovery);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s", log,
counter, meta.fd.GetNumber(), status.ToString().c_str());
ROCKS_LOG_INFO(db_options_.info_log,
"Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s",
log, counter, meta.fd.GetNumber(),
status.ToString().c_str());
if (status.ok()) {
if (meta.fd.GetFileSize() > 0) {
table_fds_.push_back(meta.fd);
@ -412,8 +412,8 @@ class Repairer {
char file_num_buf[kFormatFileNumberBufSize];
FormatFileNumber(t.meta.fd.GetNumber(), t.meta.fd.GetPathId(),
file_num_buf, sizeof(file_num_buf));
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Table #%s: ignoring %s", file_num_buf, status.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log, "Table #%s: ignoring %s",
file_num_buf, status.ToString().c_str());
ArchiveFile(fname);
} else {
tables_.push_back(t);
@ -438,7 +438,8 @@ class Repairer {
t->column_family_id = static_cast<uint32_t>(props->column_family_id);
if (t->column_family_id ==
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
ROCKS_LOG_WARN(
db_options_.info_log,
"Table #%" PRIu64
": column family unknown (probably due to legacy format); "
"adding to default column family id 0.",
@ -456,7 +457,8 @@ class Repairer {
if (status.ok()) {
cfd = vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id);
if (cfd->GetName() != props->column_family_name) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
ROCKS_LOG_ERROR(
db_options_.info_log,
"Table #%" PRIu64
": inconsistent column family name '%s'; expected '%s' for column "
"family id %" PRIu32 ".",
@ -476,9 +478,9 @@ class Repairer {
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
Slice key = iter->key();
if (!ParseInternalKey(key, &parsed)) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"Table #%" PRIu64 ": unparsable key %s", t->meta.fd.GetNumber(),
EscapeString(key).c_str());
ROCKS_LOG_ERROR(db_options_.info_log,
"Table #%" PRIu64 ": unparsable key %s",
t->meta.fd.GetNumber(), EscapeString(key).c_str());
continue;
}
@ -501,9 +503,9 @@ class Repairer {
}
delete iter;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"Table #%" PRIu64 ": %d entries %s", t->meta.fd.GetNumber(), counter,
status.ToString().c_str());
ROCKS_LOG_INFO(db_options_.info_log, "Table #%" PRIu64 ": %d entries %s",
t->meta.fd.GetNumber(), counter,
status.ToString().c_str());
}
return status;
}
@ -563,8 +565,8 @@ class Repairer {
new_file.append("/");
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, "Archiving %s: %s\n",
fname.c_str(), s.ToString().c_str());
ROCKS_LOG_INFO(db_options_.info_log, "Archiving %s: %s\n", fname.c_str(),
s.ToString().c_str());
}
};

@ -92,13 +92,10 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
Env* env;
Logger* info_log;
virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::ERROR_LEVEL, info_log,
"dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
s.ToString().c_str());
}
virtual void Info(const char* s) {
Log(InfoLogLevel::INFO_LEVEL, info_log, "%s", s);
ROCKS_LOG_ERROR(info_log, "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
s.ToString().c_str());
}
virtual void Info(const char* s) { ROCKS_LOG_INFO(info_log, "%s", s); }
} reporter_;
SequenceNumber currentBatchSeq_; // sequence number at start of current batch

@ -1056,9 +1056,10 @@ bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
Status s = GetTableProperties(&tp, file_meta);
file_meta->init_stats_from_file = true;
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, vset_->db_options_->info_log,
"Unable to load table properties for file %" PRIu64 " --- %s\n",
file_meta->fd.GetNumber(), s.ToString().c_str());
ROCKS_LOG_ERROR(vset_->db_options_->info_log,
"Unable to load table properties for file %" PRIu64
" --- %s\n",
file_meta->fd.GetNumber(), s.ToString().c_str());
return false;
}
if (tp.get() == nullptr) return false;
@ -2073,9 +2074,9 @@ void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions,
// base_bytes_min. We set it be base_bytes_min.
base_level_size = base_bytes_min + 1U;
base_level_ = first_non_empty_level;
Warn(ioptions.info_log,
"More existing levels in DB than needed. "
"max_bytes_for_level_multiplier may not be guaranteed.");
ROCKS_LOG_WARN(ioptions.info_log,
"More existing levels in DB than needed. "
"max_bytes_for_level_multiplier may not be guaranteed.");
} else {
// Find base level (where L0 data is compacted to).
base_level_ = first_non_empty_level;
@ -2381,8 +2382,8 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
// only one thread can be here at the same time
if (new_descriptor_log) {
// create manifest file
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
"Creating manifest %" PRIu64 "\n", pending_manifest_file_number_);
ROCKS_LOG_INFO(db_options_->info_log, "Creating manifest %" PRIu64 "\n",
pending_manifest_file_number_);
unique_ptr<WritableFile> descriptor_file;
EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_);
s = NewWritableFile(
@ -2425,8 +2426,8 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
s = SyncManifest(env_, db_options_, descriptor_log_->file());
}
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log,
"MANIFEST write: %s\n", s.ToString().c_str());
ROCKS_LOG_ERROR(db_options_->info_log, "MANIFEST write: %s\n",
s.ToString().c_str());
}
}
@ -2496,15 +2497,16 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
for (auto& e : batch_edits) {
version_edits = version_edits + "\n" + e->DebugString(true);
}
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log,
ROCKS_LOG_ERROR(
db_options_->info_log,
"[%s] Error in committing version edit to MANIFEST: %s",
column_family_data ? column_family_data->GetName().c_str() : "<null>",
version_edits.c_str());
delete v;
if (new_descriptor_log) {
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n",
manifest_file_number_, pending_manifest_file_number_);
ROCKS_LOG_INFO(db_options_->info_log, "Deleting manifest %" PRIu64
" current manifest %" PRIu64 "\n",
manifest_file_number_, pending_manifest_file_number_);
descriptor_log_.reset();
env_->DeleteFile(
DescriptorFileName(dbname_, pending_manifest_file_number_));
@ -2594,9 +2596,8 @@ Status VersionSet::Recover(
return Status::Corruption("CURRENT file corrupted");
}
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
"Recovering from manifest file: %s\n",
manifest_filename.c_str());
ROCKS_LOG_INFO(db_options_->info_log, "Recovering from manifest file: %s\n",
manifest_filename.c_str());
manifest_filename = dbname_ + "/" + manifest_filename;
unique_ptr<SequentialFileReader> manifest_file_reader;
@ -2734,7 +2735,8 @@ Status VersionSet::Recover(
if (cfd != nullptr) {
if (edit.has_log_number_) {
if (cfd->GetLogNumber() > edit.log_number_) {
Log(InfoLogLevel::WARN_LEVEL, db_options_->info_log,
ROCKS_LOG_WARN(
db_options_->info_log,
"MANIFEST corruption detected, but ignored - Log numbers in "
"records NOT monotonically increasing");
} else {
@ -2835,7 +2837,8 @@ Status VersionSet::Recover(
last_sequence_ = last_sequence;
prev_log_number_ = previous_log_number;
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
ROCKS_LOG_INFO(
db_options_->info_log,
"Recovered from manifest file:%s succeeded,"
"manifest_file_number is %lu, next_file_number is %lu, "
"last_sequence is %lu, log_number is %lu,"
@ -2850,9 +2853,9 @@ Status VersionSet::Recover(
if (cfd->IsDropped()) {
continue;
}
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
ROCKS_LOG_INFO(db_options_->info_log,
"Column family [%s] (ID %u), log number is %" PRIu64 "\n",
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
}
}
@ -3492,7 +3495,8 @@ bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
Version* version = c->column_family_data()->current();
const VersionStorageInfo* vstorage = version->storage_info();
if (c->input_version() != version) {
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
ROCKS_LOG_INFO(
db_options_->info_log,
"[%s] compaction output being applied to a different base version from"
" input version",
c->column_family_data()->GetName().c_str());

@ -73,9 +73,8 @@ Status WalManager::GetSortedWalFiles(VectorLogPtr& files) {
uint64_t latest_archived_log_number = 0;
if (!files.empty()) {
latest_archived_log_number = files.back()->LogNumber();
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"Latest Archived log: %" PRIu64,
latest_archived_log_number);
ROCKS_LOG_INFO(db_options_.info_log, "Latest Archived log: %" PRIu64,
latest_archived_log_number);
}
files.reserve(files.size() + logs.size());
@ -87,8 +86,8 @@ Status WalManager::GetSortedWalFiles(VectorLogPtr& files) {
// same log in both db dir and archived dir. Simply
// ignore the one in db dir. Note that, if we read
// archived dir first, we would have missed the log file.
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"%s already moved to archive", log->PathName().c_str());
ROCKS_LOG_WARN(db_options_.info_log, "%s already moved to archive",
log->PathName().c_str());
}
}
@ -137,8 +136,8 @@ void WalManager::PurgeObsoleteWALFiles() {
int64_t current_time;
Status s = env_->GetCurrentTime(&current_time);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"Can't get current time: %s", s.ToString().c_str());
ROCKS_LOG_ERROR(db_options_.info_log, "Can't get current time: %s",
s.ToString().c_str());
assert(false);
return;
}
@ -157,8 +156,8 @@ void WalManager::PurgeObsoleteWALFiles() {
std::vector<std::string> files;
s = env_->GetChildren(archival_dir, &files);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"Can't get archive files: %s", s.ToString().c_str());
ROCKS_LOG_ERROR(db_options_.info_log, "Can't get archive files: %s",
s.ToString().c_str());
assert(false);
return;
}
@ -175,17 +174,16 @@ void WalManager::PurgeObsoleteWALFiles() {
uint64_t file_m_time;
s = env_->GetFileModificationTime(file_path, &file_m_time);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Can't get file mod time: %s: %s",
file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log,
"Can't get file mod time: %s: %s", file_path.c_str(),
s.ToString().c_str());
continue;
}
if (now_seconds - file_m_time > db_options_.wal_ttl_seconds) {
s = env_->DeleteFile(file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Can't delete file: %s: %s",
file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log, "Can't delete file: %s: %s",
file_path.c_str(), s.ToString().c_str());
continue;
} else {
MutexLock l(&read_first_record_cache_mutex_);
@ -199,9 +197,9 @@ void WalManager::PurgeObsoleteWALFiles() {
uint64_t file_size;
s = env_->GetFileSize(file_path, &file_size);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"Unable to get file size: %s: %s",
file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_ERROR(db_options_.info_log,
"Unable to get file size: %s: %s", file_path.c_str(),
s.ToString().c_str());
return;
} else {
if (file_size > 0) {
@ -210,9 +208,9 @@ void WalManager::PurgeObsoleteWALFiles() {
} else {
s = env_->DeleteFile(file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Unable to delete file: %s: %s",
file_path.c_str(), s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log,
"Unable to delete file: %s: %s", file_path.c_str(),
s.ToString().c_str());
continue;
} else {
MutexLock l(&read_first_record_cache_mutex_);
@ -239,9 +237,9 @@ void WalManager::PurgeObsoleteWALFiles() {
GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
if (files_del_num > archived_logs.size()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Trying to delete more archived log files than "
"exist. Deleting all");
ROCKS_LOG_WARN(db_options_.info_log,
"Trying to delete more archived log files than "
"exist. Deleting all");
files_del_num = archived_logs.size();
}
@ -249,9 +247,8 @@ void WalManager::PurgeObsoleteWALFiles() {
std::string const file_path = archived_logs[i]->PathName();
s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
"Unable to delete file: %s: %s", file_path.c_str(),
s.ToString().c_str());
ROCKS_LOG_WARN(db_options_.info_log, "Unable to delete file: %s: %s",
file_path.c_str(), s.ToString().c_str());
continue;
} else {
MutexLock l(&read_first_record_cache_mutex_);
@ -267,9 +264,9 @@ void WalManager::ArchiveWALFile(const std::string& fname, uint64_t number) {
Status s = env_->RenameFile(fname, archived_log_name);
// The sync point below is used in (DBTest,TransactionLogIteratorRace)
TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:2");
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"Move log file %s to %s -- %s\n", fname.c_str(),
archived_log_name.c_str(), s.ToString().c_str());
ROCKS_LOG_INFO(db_options_.info_log, "Move log file %s to %s -- %s\n",
fname.c_str(), archived_log_name.c_str(),
s.ToString().c_str());
}
namespace {
@ -367,8 +364,8 @@ Status WalManager::ReadFirstRecord(const WalFileType type,
SequenceNumber* sequence) {
*sequence = 0;
if (type != kAliveLogFile && type != kArchivedLogFile) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
"[WalManger] Unknown file type %s", ToString(type).c_str());
ROCKS_LOG_ERROR(db_options_.info_log, "[WalManger] Unknown file type %s",
ToString(type).c_str());
return Status::NotSupported(
"File Type Not Known " + ToString(type));
}
@ -423,10 +420,9 @@ Status WalManager::ReadFirstLine(const std::string& fname,
Status* status;
bool ignore_error; // true if db_options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::WARN_LEVEL, info_log,
"[WalManager] %s%s: dropping %d bytes; %s",
(this->ignore_error ? "(ignoring error) " : ""), fname,
static_cast<int>(bytes), s.ToString().c_str());
ROCKS_LOG_WARN(info_log, "[WalManager] %s%s: dropping %d bytes; %s",
(this->ignore_error ? "(ignoring error) " : ""), fname,
static_cast<int>(bytes), s.ToString().c_str());
if (this->status->ok()) {
// only keep the first error
*this->status = s;

@ -338,7 +338,8 @@ BlockBasedTableBuilder::BlockBasedTableBuilder(
BlockBasedTableOptions sanitized_table_options(table_options);
if (sanitized_table_options.format_version == 0 &&
sanitized_table_options.checksum != kCRC32c) {
Log(InfoLogLevel::WARN_LEVEL, ioptions.info_log,
ROCKS_LOG_WARN(
ioptions.info_log,
"Silently converting format_version to 1 because checksum is "
"non-default");
// silently convert format_version to 1 to keep consistent with current
@ -489,8 +490,8 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
if (!compressed_ok) {
// The result of the compression was invalid. abort.
abort_compression = true;
Log(InfoLogLevel::ERROR_LEVEL, r->ioptions.info_log,
"Decompressed block did not match raw block");
ROCKS_LOG_ERROR(r->ioptions.info_log,
"Decompressed block did not match raw block");
r->status =
Status::Corruption("Decompressed block did not match raw block");
}

@ -430,9 +430,8 @@ bool IsFeatureSupported(const TableProperties& table_properties,
if (pos->second == kPropFalse) {
return false;
} else if (pos->second != kPropTrue) {
Log(InfoLogLevel::WARN_LEVEL, info_log,
"Property %s has invalidate value %s", user_prop_name.c_str(),
pos->second.c_str());
ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
user_prop_name.c_str(), pos->second.c_str());
}
}
return true;
@ -449,7 +448,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (seqno_pos != props.end()) {
// This is not an external sst file, global_seqno is not supported.
assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log,
ROCKS_LOG_ERROR(
info_log,
"A non-external sst file have global seqno property with value %s",
seqno_pos->second.c_str());
}
@ -461,7 +461,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (seqno_pos != props.end() || version != 1) {
// This is a v1 external sst file, global_seqno is not supported.
assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log,
ROCKS_LOG_ERROR(
info_log,
"An external sst file with version %u have global seqno property "
"with value %s",
version, seqno_pos->second.c_str());
@ -473,7 +474,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (global_seqno > kMaxSequenceNumber) {
assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log,
ROCKS_LOG_ERROR(
info_log,
"An external sst file with version %u have global seqno property "
"with value %llu, which is greater than kMaxSequenceNumber",
version, global_seqno);
@ -571,9 +573,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
"Error when seeking to properties block from file: %s",
s.ToString().c_str());
ROCKS_LOG_WARN(rep->ioptions.info_log,
"Error when seeking to properties block from file: %s",
s.ToString().c_str());
} else if (found_properties_block) {
s = meta_iter->status();
TableProperties* table_properties = nullptr;
@ -583,22 +585,24 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
}
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
"Encountered error while reading data from properties "
"block %s", s.ToString().c_str());
ROCKS_LOG_WARN(rep->ioptions.info_log,
"Encountered error while reading data from properties "
"block %s",
s.ToString().c_str());
} else {
rep->table_properties.reset(table_properties);
}
} else {
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log,
"Cannot find Properties block from file.");
ROCKS_LOG_ERROR(rep->ioptions.info_log,
"Cannot find Properties block from file.");
}
// Read the compression dictionary meta block
bool found_compression_dict;
s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Error when seeking to compression dictionary block from file: %s",
s.ToString().c_str());
} else if (found_compression_dict) {
@ -613,7 +617,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
rocksdb::kCompressionDictBlock,
compression_dict_block.get());
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Encountered error while reading data from compression dictionary "
"block %s",
s.ToString().c_str());
@ -627,7 +632,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
s = SeekToRangeDelBlock(meta_iter.get(), &found_range_del_block,
&rep->range_del_handle);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Error when seeking to range delete tombstones block from file: %s",
s.ToString().c_str());
} else {
@ -637,7 +643,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
Slice() /* compression_dict */,
&rep->range_del_entry);
if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Encountered error while reading data from range del block %s",
s.ToString().c_str());
}
@ -773,9 +780,10 @@ Status BlockBasedTable::ReadMetaBlock(Rep* rep,
0 /* read_amp_bytes_per_bit */);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log,
"Encountered error while reading data from properties"
" block %s", s.ToString().c_str());
ROCKS_LOG_ERROR(rep->ioptions.info_log,
"Encountered error while reading data from properties"
" block %s",
s.ToString().c_str());
return s;
}
@ -1641,10 +1649,10 @@ Status BlockBasedTable::CreateIndexReader(
const Footer& footer = rep_->footer;
if (index_type_on_file == BlockBasedTableOptions::kHashSearch &&
rep_->ioptions.prefix_extractor == nullptr) {
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
"BlockBasedTableOptions::kHashSearch requires "
"options.prefix_extractor to be set."
" Fall back to binary search index.");
ROCKS_LOG_WARN(rep_->ioptions.info_log,
"BlockBasedTableOptions::kHashSearch requires "
"options.prefix_extractor to be set."
" Fall back to binary search index.");
index_type_on_file = BlockBasedTableOptions::kBinarySearch;
}
@ -1668,9 +1676,9 @@ Status BlockBasedTable::CreateIndexReader(
if (!s.ok()) {
// we simply fall back to binary search in case there is any
// problem with prefix hash index loading.
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
"Unable to read the metaindex block."
" Fall back to binary search index.");
ROCKS_LOG_WARN(rep_->ioptions.info_log,
"Unable to read the metaindex block."
" Fall back to binary search index.");
return BinarySearchIndexReader::Create(
file, footer, footer.index_handle(), rep_->ioptions, comparator,
index_reader, rep_->persistent_cache_options);

@ -20,12 +20,12 @@
#include "util/compression.h"
#include "util/crc32c.h"
#include "util/file_reader_writer.h"
#include "util/logging.h"
#include "util/perf_context_imp.h"
#include "util/string_util.h"
#include "util/xxhash.h"
#include "util/statistics.h"
#include "util/stop_watch.h"
#include "util/string_util.h"
#include "util/xxhash.h"
namespace rocksdb {
@ -331,9 +331,9 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
// uncompressed page is not found
if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok());
Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log,
"Error reading from persistent cache. %s",
status.ToString().c_str());
ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s",
status.ToString().c_str());
}
}
}
@ -354,8 +354,9 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
} else {
if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok());
Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log,
"Error reading from persistent cache. %s", status.ToString().c_str());
ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s",
status.ToString().c_str());
}
// cache miss read from device
if (decompression_requested &&

@ -114,7 +114,7 @@ void LogPropertiesCollectionError(
std::string msg =
"Encountered error when calling TablePropertiesCollector::" +
method + "() with collector name: " + name;
Log(InfoLogLevel::ERROR_LEVEL, info_log, "%s", msg.c_str());
ROCKS_LOG_ERROR(info_log, "%s", msg.c_str());
}
bool NotifyCollectTableCollectorsOnAdd(
@ -227,8 +227,7 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
auto error_msg =
"Detect malformed value in properties meta-block:"
"\tkey: " + key + "\tval: " + raw_val.ToString();
Log(InfoLogLevel::ERROR_LEVEL, ioptions.info_log, "%s",
error_msg.c_str());
ROCKS_LOG_ERROR(ioptions.info_log, "%s", error_msg.c_str());
continue;
}
*(pos->second) = val;

@ -102,9 +102,8 @@ Slice PlainTableIndexBuilder::Finish() {
BucketizeIndexes(&hash_to_offsets, &entries_per_bucket);
keys_per_prefix_hist_.Add(num_keys_per_prefix_);
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
"Number of Keys per prefix Histogram: %s",
keys_per_prefix_hist_.ToString().c_str());
ROCKS_LOG_INFO(ioptions_.info_log, "Number of Keys per prefix Histogram: %s",
keys_per_prefix_hist_.ToString().c_str());
// From the temp data structure, populate indexes.
return FillIndexes(hash_to_offsets, entries_per_bucket);
@ -158,9 +157,9 @@ void PlainTableIndexBuilder::BucketizeIndexes(
Slice PlainTableIndexBuilder::FillIndexes(
const std::vector<IndexRecord*>& hash_to_offsets,
const std::vector<uint32_t>& entries_per_bucket) {
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
"Reserving %" PRIu32 " bytes for plain table's sub_index",
sub_index_size_);
ROCKS_LOG_DEBUG(ioptions_.info_log,
"Reserving %" PRIu32 " bytes for plain table's sub_index",
sub_index_size_);
auto total_allocate_size = GetTotalSize();
char* allocated = arena_->AllocateAligned(
total_allocate_size, huge_page_tlb_size_, ioptions_.info_log);
@ -203,9 +202,9 @@ Slice PlainTableIndexBuilder::FillIndexes(
}
assert(sub_index_offset == sub_index_size_);
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt, index_size_,
sub_index_size_);
ROCKS_LOG_DEBUG(ioptions_.info_log,
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
index_size_, sub_index_size_);
return Slice(allocated, GetTotalSize());
}

@ -18,9 +18,10 @@
#ifndef OS_WIN
#include <sys/mman.h>
#endif
#include "port/port.h"
#include <algorithm>
#include "port/port.h"
#include "rocksdb/env.h"
#include "util/logging.h"
namespace rocksdb {
@ -152,8 +153,9 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
char* addr = AllocateFromHugePage(reserved_size);
if (addr == nullptr) {
Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s",
strerror(errno));
ROCKS_LOG_WARN(logger,
"AllocateAligned fail to allocate huge TLB pages: %s",
strerror(errno));
// fail back to malloc
} else {
return addr;

@ -109,42 +109,47 @@ uint64_t MutableCFOptions::MaxFileSizeForLevel(int level) const {
void MutableCFOptions::Dump(Logger* log) const {
// Memtable related options
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
Log(log, " max_write_buffer_number: %d",
max_write_buffer_number);
Log(log, " arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
Log(log, " memtable_prefix_bloom_ratio: %f",
memtable_prefix_bloom_size_ratio);
Log(log, " memtable_huge_page_size: %" ROCKSDB_PRIszt,
memtable_huge_page_size);
Log(log, " max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
Log(log, " inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
Log(log, " disable_auto_compactions: %d",
disable_auto_compactions);
Log(log, " soft_pending_compaction_bytes_limit: %" PRIu64,
soft_pending_compaction_bytes_limit);
Log(log, " hard_pending_compaction_bytes_limit: %" PRIu64,
hard_pending_compaction_bytes_limit);
Log(log, " level0_file_num_compaction_trigger: %d",
level0_file_num_compaction_trigger);
Log(log, " level0_slowdown_writes_trigger: %d",
level0_slowdown_writes_trigger);
Log(log, " level0_stop_writes_trigger: %d",
level0_stop_writes_trigger);
Log(log, " max_compaction_bytes: %" PRIu64,
max_compaction_bytes);
Log(log, " target_file_size_base: %" PRIu64,
target_file_size_base);
Log(log, " target_file_size_multiplier: %d",
target_file_size_multiplier);
Log(log, " max_bytes_for_level_base: %" PRIu64,
max_bytes_for_level_base);
Log(log, " max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier);
ROCKS_LOG_INFO(log,
" write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
ROCKS_LOG_INFO(log, " max_write_buffer_number: %d",
max_write_buffer_number);
ROCKS_LOG_INFO(log,
" arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
ROCKS_LOG_INFO(log, " memtable_prefix_bloom_ratio: %f",
memtable_prefix_bloom_size_ratio);
ROCKS_LOG_INFO(log,
" memtable_huge_page_size: %" ROCKSDB_PRIszt,
memtable_huge_page_size);
ROCKS_LOG_INFO(log,
" max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
ROCKS_LOG_INFO(log,
" inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
ROCKS_LOG_INFO(log, " disable_auto_compactions: %d",
disable_auto_compactions);
ROCKS_LOG_INFO(log, " soft_pending_compaction_bytes_limit: %" PRIu64,
soft_pending_compaction_bytes_limit);
ROCKS_LOG_INFO(log, " hard_pending_compaction_bytes_limit: %" PRIu64,
hard_pending_compaction_bytes_limit);
ROCKS_LOG_INFO(log, " level0_file_num_compaction_trigger: %d",
level0_file_num_compaction_trigger);
ROCKS_LOG_INFO(log, " level0_slowdown_writes_trigger: %d",
level0_slowdown_writes_trigger);
ROCKS_LOG_INFO(log, " level0_stop_writes_trigger: %d",
level0_stop_writes_trigger);
ROCKS_LOG_INFO(log, " max_compaction_bytes: %" PRIu64,
max_compaction_bytes);
ROCKS_LOG_INFO(log, " target_file_size_base: %" PRIu64,
target_file_size_base);
ROCKS_LOG_INFO(log, " target_file_size_multiplier: %d",
target_file_size_multiplier);
ROCKS_LOG_INFO(log, " max_bytes_for_level_base: %" PRIu64,
max_bytes_for_level_base);
ROCKS_LOG_INFO(log, " max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier);
std::string result;
char buf[10];
for (const auto m : max_bytes_for_level_multiplier_additional) {
@ -157,14 +162,16 @@ void MutableCFOptions::Dump(Logger* log) const {
result = "";
}
Log(log, "max_bytes_for_level_multiplier_additional: %s", result.c_str());
Log(log, " max_sequential_skip_in_iterations: %" PRIu64,
max_sequential_skip_in_iterations);
Log(log, " paranoid_file_checks: %d",
paranoid_file_checks);
Log(log, " report_bg_io_stats: %d", report_bg_io_stats);
Log(log, " compression: %d",
static_cast<int>(compression));
ROCKS_LOG_INFO(log, "max_bytes_for_level_multiplier_additional: %s",
result.c_str());
ROCKS_LOG_INFO(log, " max_sequential_skip_in_iterations: %" PRIu64,
max_sequential_skip_in_iterations);
ROCKS_LOG_INFO(log, " paranoid_file_checks: %d",
paranoid_file_checks);
ROCKS_LOG_INFO(log, " report_bg_io_stats: %d",
report_bg_io_stats);
ROCKS_LOG_INFO(log, " compression: %d",
static_cast<int>(compression));
}
} // namespace rocksdb

@ -16,6 +16,7 @@
#include "rocksdb/env.h"
#include "rocksdb/sst_file_manager.h"
#include "rocksdb/wal_filter.h"
#include "util/logging.h"
namespace rocksdb {
@ -88,120 +89,132 @@ ImmutableDBOptions::ImmutableDBOptions(const DBOptions& options)
}
void ImmutableDBOptions::Dump(Logger* log) const {
Header(log, " Options.error_if_exists: %d",
error_if_exists);
Header(log, " Options.create_if_missing: %d",
create_if_missing);
Header(log, " Options.paranoid_checks: %d",
paranoid_checks);
Header(log, " Options.env: %p", env);
Header(log, " Options.info_log: %p",
info_log.get());
Header(log, " Options.max_open_files: %d",
max_open_files);
Header(log, " Options.max_file_opening_threads: %d",
max_file_opening_threads);
Header(log, " Options.use_fsync: %d", use_fsync);
Header(log,
" Options.max_log_file_size: %" ROCKSDB_PRIszt,
max_log_file_size);
Header(log, " Options.max_manifest_file_size: %" PRIu64,
max_manifest_file_size);
Header(log,
" Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
log_file_time_to_roll);
Header(log,
" Options.keep_log_file_num: %" ROCKSDB_PRIszt,
keep_log_file_num);
Header(log,
" Options.recycle_log_file_num: %" ROCKSDB_PRIszt,
recycle_log_file_num);
Header(log, " Options.allow_fallocate: %d",
allow_fallocate);
Header(log, " Options.allow_mmap_reads: %d",
allow_mmap_reads);
Header(log, " Options.allow_mmap_writes: %d",
allow_mmap_writes);
Header(log, " Options.use_direct_reads: %d",
use_direct_reads);
Header(log, " Options.use_direct_writes: %d",
use_direct_writes);
Header(log, " Options.create_missing_column_families: %d",
create_missing_column_families);
Header(log, " Options.db_log_dir: %s",
db_log_dir.c_str());
Header(log, " Options.wal_dir: %s",
wal_dir.c_str());
Header(log, " Options.table_cache_numshardbits: %d",
table_cache_numshardbits);
Header(log, " Options.max_subcompactions: %" PRIu32,
max_subcompactions);
Header(log, " Options.max_background_flushes: %d",
max_background_flushes);
Header(log, " Options.WAL_ttl_seconds: %" PRIu64,
wal_ttl_seconds);
Header(log, " Options.WAL_size_limit_MB: %" PRIu64,
wal_size_limit_mb);
Header(log,
" Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
manifest_preallocation_size);
Header(log, " Options.is_fd_close_on_exec: %d",
is_fd_close_on_exec);
Header(log, " Options.stats_dump_period_sec: %u",
stats_dump_period_sec);
Header(log, " Options.advise_random_on_open: %d",
advise_random_on_open);
Header(log,
" Options.db_write_buffer_size: %" ROCKSDB_PRIszt,
db_write_buffer_size);
Header(log, " Options.access_hint_on_compaction_start: %d",
static_cast<int>(access_hint_on_compaction_start));
Header(log, " Options.new_table_reader_for_compaction_inputs: %d",
new_table_reader_for_compaction_inputs);
Header(log,
" Options.compaction_readahead_size: %" ROCKSDB_PRIszt,
compaction_readahead_size);
Header(log,
" Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt,
random_access_max_buffer_size);
Header(log,
" Options.writable_file_max_buffer_size: %" ROCKSDB_PRIszt,
writable_file_max_buffer_size);
Header(log, " Options.use_adaptive_mutex: %d",
use_adaptive_mutex);
Header(log, " Options.rate_limiter: %p",
rate_limiter.get());
ROCKS_LOG_HEADER(log, " Options.error_if_exists: %d",
error_if_exists);
ROCKS_LOG_HEADER(log, " Options.create_if_missing: %d",
create_if_missing);
ROCKS_LOG_HEADER(log, " Options.paranoid_checks: %d",
paranoid_checks);
ROCKS_LOG_HEADER(log, " Options.env: %p",
env);
ROCKS_LOG_HEADER(log, " Options.info_log: %p",
info_log.get());
ROCKS_LOG_HEADER(log, " Options.max_open_files: %d",
max_open_files);
ROCKS_LOG_HEADER(log, " Options.max_file_opening_threads: %d",
max_file_opening_threads);
ROCKS_LOG_HEADER(log, " Options.use_fsync: %d",
use_fsync);
ROCKS_LOG_HEADER(
log, " Options.max_log_file_size: %" ROCKSDB_PRIszt,
max_log_file_size);
ROCKS_LOG_HEADER(log,
" Options.max_manifest_file_size: %" PRIu64,
max_manifest_file_size);
ROCKS_LOG_HEADER(
log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
log_file_time_to_roll);
ROCKS_LOG_HEADER(
log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt,
keep_log_file_num);
ROCKS_LOG_HEADER(
log, " Options.recycle_log_file_num: %" ROCKSDB_PRIszt,
recycle_log_file_num);
ROCKS_LOG_HEADER(log, " Options.allow_fallocate: %d",
allow_fallocate);
ROCKS_LOG_HEADER(log, " Options.allow_mmap_reads: %d",
allow_mmap_reads);
ROCKS_LOG_HEADER(log, " Options.allow_mmap_writes: %d",
allow_mmap_writes);
ROCKS_LOG_HEADER(log, " Options.use_direct_reads: %d",
use_direct_reads);
ROCKS_LOG_HEADER(log, " Options.use_direct_writes: %d",
use_direct_writes);
ROCKS_LOG_HEADER(log, " Options.create_missing_column_families: %d",
create_missing_column_families);
ROCKS_LOG_HEADER(log, " Options.db_log_dir: %s",
db_log_dir.c_str());
ROCKS_LOG_HEADER(log, " Options.wal_dir: %s",
wal_dir.c_str());
ROCKS_LOG_HEADER(log, " Options.table_cache_numshardbits: %d",
table_cache_numshardbits);
ROCKS_LOG_HEADER(log,
" Options.max_subcompactions: %" PRIu32,
max_subcompactions);
ROCKS_LOG_HEADER(log, " Options.max_background_flushes: %d",
max_background_flushes);
ROCKS_LOG_HEADER(log,
" Options.WAL_ttl_seconds: %" PRIu64,
wal_ttl_seconds);
ROCKS_LOG_HEADER(log,
" Options.WAL_size_limit_MB: %" PRIu64,
wal_size_limit_mb);
ROCKS_LOG_HEADER(
log, " Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
manifest_preallocation_size);
ROCKS_LOG_HEADER(log, " Options.is_fd_close_on_exec: %d",
is_fd_close_on_exec);
ROCKS_LOG_HEADER(log, " Options.stats_dump_period_sec: %u",
stats_dump_period_sec);
ROCKS_LOG_HEADER(log, " Options.advise_random_on_open: %d",
advise_random_on_open);
ROCKS_LOG_HEADER(
log, " Options.db_write_buffer_size: %" ROCKSDB_PRIszt,
db_write_buffer_size);
ROCKS_LOG_HEADER(log, " Options.access_hint_on_compaction_start: %d",
static_cast<int>(access_hint_on_compaction_start));
ROCKS_LOG_HEADER(log, " Options.new_table_reader_for_compaction_inputs: %d",
new_table_reader_for_compaction_inputs);
ROCKS_LOG_HEADER(
log, " Options.compaction_readahead_size: %" ROCKSDB_PRIszt,
compaction_readahead_size);
ROCKS_LOG_HEADER(
log, " Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt,
random_access_max_buffer_size);
ROCKS_LOG_HEADER(
log, " Options.writable_file_max_buffer_size: %" ROCKSDB_PRIszt,
writable_file_max_buffer_size);
ROCKS_LOG_HEADER(log, " Options.use_adaptive_mutex: %d",
use_adaptive_mutex);
ROCKS_LOG_HEADER(log, " Options.rate_limiter: %p",
rate_limiter.get());
Header(
log, " Options.sst_file_manager.rate_bytes_per_sec: %" PRIi64,
sst_file_manager ? sst_file_manager->GetDeleteRateBytesPerSecond() : 0);
Header(log, " Options.bytes_per_sync: %" PRIu64,
bytes_per_sync);
Header(log, " Options.wal_bytes_per_sync: %" PRIu64,
wal_bytes_per_sync);
Header(log, " Options.wal_recovery_mode: %d",
wal_recovery_mode);
Header(log, " Options.enable_thread_tracking: %d",
enable_thread_tracking);
Header(log, " Options.allow_concurrent_memtable_write: %d",
allow_concurrent_memtable_write);
Header(log, " Options.enable_write_thread_adaptive_yield: %d",
enable_write_thread_adaptive_yield);
Header(log, " Options.write_thread_max_yield_usec: %" PRIu64,
write_thread_max_yield_usec);
Header(log, " Options.write_thread_slow_yield_usec: %" PRIu64,
write_thread_slow_yield_usec);
ROCKS_LOG_HEADER(log,
" Options.bytes_per_sync: %" PRIu64,
bytes_per_sync);
ROCKS_LOG_HEADER(log,
" Options.wal_bytes_per_sync: %" PRIu64,
wal_bytes_per_sync);
ROCKS_LOG_HEADER(log, " Options.wal_recovery_mode: %d",
wal_recovery_mode);
ROCKS_LOG_HEADER(log, " Options.enable_thread_tracking: %d",
enable_thread_tracking);
ROCKS_LOG_HEADER(log, " Options.allow_concurrent_memtable_write: %d",
allow_concurrent_memtable_write);
ROCKS_LOG_HEADER(log, " Options.enable_write_thread_adaptive_yield: %d",
enable_write_thread_adaptive_yield);
ROCKS_LOG_HEADER(log,
" Options.write_thread_max_yield_usec: %" PRIu64,
write_thread_max_yield_usec);
ROCKS_LOG_HEADER(log,
" Options.write_thread_slow_yield_usec: %" PRIu64,
write_thread_slow_yield_usec);
if (row_cache) {
Header(log, " Options.row_cache: %" PRIu64,
row_cache->GetCapacity());
ROCKS_LOG_HEADER(
log, " Options.row_cache: %" PRIu64,
row_cache->GetCapacity());
} else {
Header(log, " Options.row_cache: None");
ROCKS_LOG_HEADER(log,
" Options.row_cache: None");
}
#ifndef ROCKSDB_LITE
Header(log, " Options.wal_filter: %s",
wal_filter ? wal_filter->Name() : "None");
ROCKS_LOG_HEADER(log, " Options.wal_filter: %s",
wal_filter ? wal_filter->Name() : "None");
#endif // ROCKDB_LITE
Header(log, " Options.avoid_flush_during_recovery: %d",
avoid_flush_during_recovery);
ROCKS_LOG_HEADER(log, " Options.avoid_flush_during_recovery: %d",
avoid_flush_during_recovery);
}
MutableDBOptions::MutableDBOptions()
@ -222,19 +235,19 @@ MutableDBOptions::MutableDBOptions(const DBOptions& options)
options.delete_obsolete_files_period_micros) {}
void MutableDBOptions::Dump(Logger* log) const {
Header(log, " Options.base_background_compactions: %d",
base_background_compactions);
Header(log, " Options.max_background_compactions: %d",
max_background_compactions);
Header(log, " Options.avoid_flush_during_shutdown: %d",
avoid_flush_during_shutdown);
Header(log, " Options.delayed_write_rate : %" PRIu64,
delayed_write_rate);
Header(log, " Options.max_total_wal_size: %" PRIu64,
max_total_wal_size);
Header(log,
" Options.delete_obsolete_files_period_micros: %" PRIu64,
delete_obsolete_files_period_micros);
ROCKS_LOG_HEADER(log, " Options.base_background_compactions: %d",
base_background_compactions);
ROCKS_LOG_HEADER(log, " Options.max_background_compactions: %d",
max_background_compactions);
ROCKS_LOG_HEADER(log, " Options.avoid_flush_during_shutdown: %d",
avoid_flush_during_shutdown);
ROCKS_LOG_HEADER(log, " Options.delayed_write_rate : %" PRIu64,
delayed_write_rate);
ROCKS_LOG_HEADER(log, " Options.max_total_wal_size: %" PRIu64,
max_total_wal_size);
ROCKS_LOG_HEADER(
log, " Options.delete_obsolete_files_period_micros: %" PRIu64,
delete_obsolete_files_period_micros);
}
} // namespace rocksdb

@ -12,8 +12,9 @@
#include "port/port.h"
#include "rocksdb/env.h"
#include "util/sst_file_manager_impl.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/sst_file_manager_impl.h"
#include "util/sync_point.h"
namespace rocksdb {
@ -64,9 +65,8 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path) {
std::string path_in_trash;
s = MoveToTrash(file_path, &path_in_trash);
if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, info_log_,
"Failed to move %s to trash directory (%s)", file_path.c_str(),
trash_dir_.c_str());
ROCKS_LOG_ERROR(info_log_, "Failed to move %s to trash directory (%s)",
file_path.c_str(), trash_dir_.c_str());
s = env_->DeleteFile(file_path);
if (s.ok() && sst_file_manager_) {
sst_file_manager_->OnDeleteFile(file_path);
@ -191,9 +191,8 @@ Status DeleteScheduler::DeleteTrashFile(const std::string& path_in_trash,
if (!s.ok()) {
// Error while getting file size or while deleting
Log(InfoLogLevel::ERROR_LEVEL, info_log_,
"Failed to delete %s from trash -- %s", path_in_trash.c_str(),
s.ToString().c_str());
ROCKS_LOG_ERROR(info_log_, "Failed to delete %s from trash -- %s",
path_in_trash.c_str(), s.ToString().c_str());
*deleted_bytes = 0;
} else {
*deleted_bytes = file_size;

@ -57,23 +57,20 @@ class HdfsReadableFile : virtual public SequentialFile,
public:
HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile opening file %s\n",
filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
filename_.c_str(), hfile_);
ROCKS_LOG_DEBUG(mylog,
"[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
filename_.c_str(), hfile_);
}
virtual ~HdfsReadableFile() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile closing file %s\n",
filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
filename_.c_str());
hdfsCloseFile(fileSys_, hfile_);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile closed file %s\n",
filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
filename_.c_str());
hfile_ = nullptr;
}
@ -84,9 +81,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// sequential access, read data at current offset in file
virtual Status Read(size_t n, Slice* result, char* scratch) {
Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile reading %s %ld\n",
filename_.c_str(), n);
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
filename_.c_str(), n);
char* buffer = scratch;
size_t total_bytes_read = 0;
@ -107,8 +103,8 @@ class HdfsReadableFile : virtual public SequentialFile,
}
assert(total_bytes_read <= n);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile read %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile read %s\n",
filename_.c_str());
if (bytes_read < 0) {
s = IOError(filename_, errno);
@ -123,12 +119,12 @@ class HdfsReadableFile : virtual public SequentialFile,
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile preading %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile preading %s\n",
filename_.c_str());
ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
(void*)scratch, (tSize)n);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile pread %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile pread %s\n",
filename_.c_str());
*result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
if (bytes_read < 0) {
// An error: return a non-ok status
@ -138,8 +134,8 @@ class HdfsReadableFile : virtual public SequentialFile,
}
virtual Status Skip(uint64_t n) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile skip %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile skip %s\n",
filename_.c_str());
// get current offset from file
tOffset current = hdfsTell(fileSys_, hfile_);
if (current < 0) {
@ -158,8 +154,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// returns true if we are at the end of file, false otherwise
bool feof() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile feof %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile feof %s\n",
filename_.c_str());
if (hdfsTell(fileSys_, hfile_) == fileSize()) {
return true;
}
@ -168,8 +164,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// the current size of the file
tOffset fileSize() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsReadableFile fileSize %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile fileSize %s\n",
filename_.c_str());
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
tOffset size = 0L;
if (pFileInfo != nullptr) {
@ -192,20 +188,20 @@ class HdfsWritableFile: public WritableFile {
public:
HdfsWritableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname) , hfile_(nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile opening %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opening %s\n",
filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile opened %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opened %s\n",
filename_.c_str());
assert(hfile_ != nullptr);
}
virtual ~HdfsWritableFile() {
if (hfile_ != nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
filename_.c_str());
hdfsCloseFile(fileSys_, hfile_);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
filename_.c_str());
hfile_ = nullptr;
}
}
@ -222,13 +218,13 @@ class HdfsWritableFile: public WritableFile {
}
virtual Status Append(const Slice& data) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile Append %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Append %s\n",
filename_.c_str());
const char* src = data.data();
size_t left = data.size();
size_t ret = hdfsWrite(fileSys_, hfile_, src, left);
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile Appended %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Appended %s\n",
filename_.c_str());
if (ret != left) {
return IOError(filename_, errno);
}
@ -241,16 +237,16 @@ class HdfsWritableFile: public WritableFile {
virtual Status Sync() {
Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile Sync %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Sync %s\n",
filename_.c_str());
if (hdfsFlush(fileSys_, hfile_) == -1) {
return IOError(filename_, errno);
}
if (hdfsHSync(fileSys_, hfile_) == -1) {
return IOError(filename_, errno);
}
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile Synced %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Synced %s\n",
filename_.c_str());
return Status::OK();
}
@ -263,13 +259,13 @@ class HdfsWritableFile: public WritableFile {
}
virtual Status Close() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
filename_.c_str());
if (hdfsCloseFile(fileSys_, hfile_) != 0) {
return IOError(filename_, errno);
}
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
filename_.c_str());
hfile_ = nullptr;
return Status::OK();
}
@ -284,15 +280,13 @@ class HdfsLogger : public Logger {
public:
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
: file_(f), gettid_(gettid) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsLogger opened %s\n",
file_->getName().c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger opened %s\n",
file_->getName().c_str());
}
virtual ~HdfsLogger() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
"[hdfs] HdfsLogger closed %s\n",
file_->getName().c_str());
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger closed %s\n",
file_->getName().c_str());
delete file_;
if (mylog != nullptr && mylog == this) {
mylog = nullptr;
@ -439,8 +433,7 @@ Status HdfsEnv::NewDirectory(const std::string& name,
result->reset(new HdfsDirectory(0));
return Status::OK();
default: // fail if the directory doesn't exist
Log(InfoLogLevel::FATAL_LEVEL,
mylog, "NewDirectory hdfsExists call failed");
ROCKS_LOG_FATAL(mylog, "NewDirectory hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + " on path " + name +
".\n");
@ -455,8 +448,7 @@ Status HdfsEnv::FileExists(const std::string& fname) {
case HDFS_DOESNT_EXIST:
return Status::NotFound();
default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL,
mylog, "FileExists hdfsExists call failed");
ROCKS_LOG_FATAL(mylog, "FileExists hdfsExists call failed");
return Status::IOError("hdfsExists call failed with error " +
ToString(value) + " on path " + fname + ".\n");
}
@ -483,8 +475,7 @@ Status HdfsEnv::GetChildren(const std::string& path,
}
} else {
// numEntries < 0 indicates error
Log(InfoLogLevel::FATAL_LEVEL, mylog,
"hdfsListDirectory call failed with error ");
ROCKS_LOG_FATAL(mylog, "hdfsListDirectory call failed with error ");
throw HdfsFatalException(
"hdfsListDirectory call failed negative error.\n");
}
@ -493,8 +484,7 @@ Status HdfsEnv::GetChildren(const std::string& path,
case HDFS_DOESNT_EXIST: // directory does not exist, exit
return Status::NotFound();
default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL, mylog,
"GetChildren hdfsExists call failed");
ROCKS_LOG_FATAL(mylog, "GetChildren hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + ".\n");
}
@ -524,8 +514,7 @@ Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
case HDFS_DOESNT_EXIST:
return CreateDir(name);
default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL, mylog,
"CreateDirIfMissing hdfsExists call failed");
ROCKS_LOG_FATAL(mylog, "CreateDirIfMissing hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + ".\n");
}

@ -1075,15 +1075,16 @@ TEST_P(EnvPosixTestWithParam, LogBufferTest) {
std::fill_n(bytes9000, sizeof(bytes9000), '1');
bytes9000[sizeof(bytes9000) - 1] = '\0';
LogToBuffer(&log_buffer, "x%sx", bytes200);
LogToBuffer(&log_buffer, "x%sx", bytes600);
LogToBuffer(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200);
LogToBuffer(&log_buffer, "x%sx%sx", bytes200, bytes600);
LogToBuffer(&log_buffer, "x%sx%sx", bytes600, bytes9000);
ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes200);
ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes600);
ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200);
ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes200, bytes600);
ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes600, bytes9000);
LogToBuffer(&log_buffer_debug, "x%sx", bytes200);
ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx", bytes200);
test_logger.SetInfoLogLevel(DEBUG_LEVEL);
LogToBuffer(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000, bytes200);
ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000,
bytes200);
ASSERT_EQ(0, test_logger.log_count);
log_buffer.FlushBufferToLog();
@ -1124,7 +1125,7 @@ TEST_P(EnvPosixTestWithParam, LogBufferMaxSizeTest) {
TestLogger2 test_logger(max_log_size);
test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
LogToBuffer(&log_buffer, max_log_size, "%s", bytes9000);
ROCKS_LOG_BUFFER_MAX_SZ(&log_buffer, max_log_size, "%s", bytes9000);
log_buffer.FlushBufferToLog();
}
}

@ -14,6 +14,7 @@
#include <sstream>
#include <string>
#include "util/logging.h"
#include "util/string_util.h"
namespace rocksdb {

@ -16,6 +16,42 @@
#include <string>
#include "port/port.h"
// Helper macros that include information about file name and line number
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define PREPEND_FILE_LINE(FMT) ("[" __FILE__ ":" TOSTRING(__LINE__) "] " FMT)
// Don't inclide file/line info in HEADER level
#define ROCKS_LOG_HEADER(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::HEADER_LEVEL, LGR, FMT, ##__VA_ARGS__)
#define ROCKS_LOG_DEBUG(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::DEBUG_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_INFO(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::INFO_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_WARN(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::WARN_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_ERROR(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::ERROR_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_FATAL(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::FATAL_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_BUFFER(LOG_BUF, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, PREPEND_FILE_LINE(FMT), ##__VA_ARGS__)
#define ROCKS_LOG_BUFFER_MAX_SZ(LOG_BUF, MAX_LOG_SIZE, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, MAX_LOG_SIZE, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
namespace rocksdb {
class Slice;

@ -196,91 +196,108 @@ void DBOptions::Dump(Logger* log) const {
} // DBOptions::Dump
void ColumnFamilyOptions::Dump(Logger* log) const {
Header(log, " Options.comparator: %s", comparator->Name());
Header(log, " Options.merge_operator: %s",
merge_operator ? merge_operator->Name() : "None");
Header(log, " Options.compaction_filter: %s",
compaction_filter ? compaction_filter->Name() : "None");
Header(log, " Options.compaction_filter_factory: %s",
ROCKS_LOG_HEADER(log, " Options.comparator: %s",
comparator->Name());
ROCKS_LOG_HEADER(log, " Options.merge_operator: %s",
merge_operator ? merge_operator->Name() : "None");
ROCKS_LOG_HEADER(log, " Options.compaction_filter: %s",
compaction_filter ? compaction_filter->Name() : "None");
ROCKS_LOG_HEADER(
log, " Options.compaction_filter_factory: %s",
compaction_filter_factory ? compaction_filter_factory->Name() : "None");
Header(log, " Options.memtable_factory: %s", memtable_factory->Name());
Header(log, " Options.table_factory: %s", table_factory->Name());
Header(log, " table_factory options: %s",
table_factory->GetPrintableTableOptions().c_str());
Header(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
Header(log, " Options.max_write_buffer_number: %d", max_write_buffer_number);
if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Header(log, " Options.compression[%d]: %s", i,
CompressionTypeToString(compression_per_level[i]).c_str());
}
ROCKS_LOG_HEADER(log, " Options.memtable_factory: %s",
memtable_factory->Name());
ROCKS_LOG_HEADER(log, " Options.table_factory: %s",
table_factory->Name());
ROCKS_LOG_HEADER(log, " table_factory options: %s",
table_factory->GetPrintableTableOptions().c_str());
ROCKS_LOG_HEADER(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt,
write_buffer_size);
ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number: %d",
max_write_buffer_number);
if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
ROCKS_LOG_HEADER(
log, " Options.compression[%d]: %s", i,
CompressionTypeToString(compression_per_level[i]).c_str());
}
} else {
Header(log, " Options.compression: %s",
CompressionTypeToString(compression).c_str());
ROCKS_LOG_HEADER(log, " Options.compression: %s",
CompressionTypeToString(compression).c_str());
}
Header(log, " Options.bottommost_compression: %s",
bottommost_compression == kDisableCompressionOption
? "Disabled"
: CompressionTypeToString(bottommost_compression).c_str());
Header(log, " Options.prefix_extractor: %s",
ROCKS_LOG_HEADER(
log, " Options.bottommost_compression: %s",
bottommost_compression == kDisableCompressionOption
? "Disabled"
: CompressionTypeToString(bottommost_compression).c_str());
ROCKS_LOG_HEADER(
log, " Options.prefix_extractor: %s",
prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
Header(log, " Options.memtable_insert_with_hint_prefix_extractor: %s",
memtable_insert_with_hint_prefix_extractor == nullptr
? "nullptr"
: memtable_insert_with_hint_prefix_extractor->Name());
Header(log, " Options.num_levels: %d", num_levels);
Header(log, " Options.min_write_buffer_number_to_merge: %d",
min_write_buffer_number_to_merge);
Header(log, " Options.max_write_buffer_number_to_maintain: %d",
max_write_buffer_number_to_maintain);
Header(log, " Options.compression_opts.window_bits: %d",
compression_opts.window_bits);
Header(log, " Options.compression_opts.level: %d",
compression_opts.level);
Header(log, " Options.compression_opts.strategy: %d",
compression_opts.strategy);
Header(log,
ROCKS_LOG_HEADER(log,
" Options.memtable_insert_with_hint_prefix_extractor: %s",
memtable_insert_with_hint_prefix_extractor == nullptr
? "nullptr"
: memtable_insert_with_hint_prefix_extractor->Name());
ROCKS_LOG_HEADER(log, " Options.num_levels: %d", num_levels);
ROCKS_LOG_HEADER(log, " Options.min_write_buffer_number_to_merge: %d",
min_write_buffer_number_to_merge);
ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number_to_maintain: %d",
max_write_buffer_number_to_maintain);
ROCKS_LOG_HEADER(log, " Options.compression_opts.window_bits: %d",
compression_opts.window_bits);
ROCKS_LOG_HEADER(log, " Options.compression_opts.level: %d",
compression_opts.level);
ROCKS_LOG_HEADER(log, " Options.compression_opts.strategy: %d",
compression_opts.strategy);
ROCKS_LOG_HEADER(
log,
" Options.compression_opts.max_dict_bytes: %" ROCKSDB_PRIszt,
compression_opts.max_dict_bytes);
Header(log, " Options.level0_file_num_compaction_trigger: %d",
level0_file_num_compaction_trigger);
Header(log, " Options.level0_slowdown_writes_trigger: %d",
level0_slowdown_writes_trigger);
Header(log, " Options.level0_stop_writes_trigger: %d",
level0_stop_writes_trigger);
Header(log, " Options.target_file_size_base: %" PRIu64,
ROCKS_LOG_HEADER(log, " Options.level0_file_num_compaction_trigger: %d",
level0_file_num_compaction_trigger);
ROCKS_LOG_HEADER(log, " Options.level0_slowdown_writes_trigger: %d",
level0_slowdown_writes_trigger);
ROCKS_LOG_HEADER(log, " Options.level0_stop_writes_trigger: %d",
level0_stop_writes_trigger);
ROCKS_LOG_HEADER(
log, " Options.target_file_size_base: %" PRIu64,
target_file_size_base);
Header(log, " Options.target_file_size_multiplier: %d",
target_file_size_multiplier);
Header(log, " Options.max_bytes_for_level_base: %" PRIu64,
ROCKS_LOG_HEADER(log, " Options.target_file_size_multiplier: %d",
target_file_size_multiplier);
ROCKS_LOG_HEADER(
log, " Options.max_bytes_for_level_base: %" PRIu64,
max_bytes_for_level_base);
Header(log, "Options.level_compaction_dynamic_level_bytes: %d",
level_compaction_dynamic_level_bytes);
Header(log, " Options.max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier);
ROCKS_LOG_HEADER(log, "Options.level_compaction_dynamic_level_bytes: %d",
level_compaction_dynamic_level_bytes);
ROCKS_LOG_HEADER(log, " Options.max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier);
for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size();
i++) {
Header(log,
"Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
"]: %d",
i, max_bytes_for_level_multiplier_additional[i]);
ROCKS_LOG_HEADER(
log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
"]: %d",
i, max_bytes_for_level_multiplier_additional[i]);
}
Header(log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
ROCKS_LOG_HEADER(
log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
max_sequential_skip_in_iterations);
Header(log, " Options.max_compaction_bytes: %" PRIu64,
max_compaction_bytes);
Header(log,
" Options.arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
Header(log, " Options.soft_pending_compaction_bytes_limit: %" PRIu64,
soft_pending_compaction_bytes_limit);
Header(log, " Options.hard_pending_compaction_bytes_limit: %" PRIu64,
hard_pending_compaction_bytes_limit);
Header(log, " Options.rate_limit_delay_max_milliseconds: %u",
rate_limit_delay_max_milliseconds);
Header(log, " Options.disable_auto_compactions: %d",
disable_auto_compactions);
ROCKS_LOG_HEADER(
log, " Options.max_compaction_bytes: %" PRIu64,
max_compaction_bytes);
ROCKS_LOG_HEADER(
log,
" Options.arena_block_size: %" ROCKSDB_PRIszt,
arena_block_size);
ROCKS_LOG_HEADER(log,
" Options.soft_pending_compaction_bytes_limit: %" PRIu64,
soft_pending_compaction_bytes_limit);
ROCKS_LOG_HEADER(log,
" Options.hard_pending_compaction_bytes_limit: %" PRIu64,
hard_pending_compaction_bytes_limit);
ROCKS_LOG_HEADER(log, " Options.rate_limit_delay_max_milliseconds: %u",
rate_limit_delay_max_milliseconds);
ROCKS_LOG_HEADER(log, " Options.disable_auto_compactions: %d",
disable_auto_compactions);
const auto& it_compaction_style =
compaction_style_to_string.find(compaction_style);
@ -291,8 +308,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
} else {
str_compaction_style = it_compaction_style->second;
}
Header(log, " Options.compaction_style: %s",
str_compaction_style.c_str());
ROCKS_LOG_HEADER(log,
" Options.compaction_style: %s",
str_compaction_style.c_str());
const auto& it_compaction_pri =
compaction_pri_to_string.find(compaction_pri);
@ -303,55 +321,69 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
} else {
str_compaction_pri = it_compaction_pri->second;
}
Header(log, " Options.compaction_pri: %s",
str_compaction_pri.c_str());
Header(log, " Options.compaction_options_universal.size_ratio: %u",
compaction_options_universal.size_ratio);
Header(log, "Options.compaction_options_universal.min_merge_width: %u",
compaction_options_universal.min_merge_width);
Header(log, "Options.compaction_options_universal.max_merge_width: %u",
compaction_options_universal.max_merge_width);
Header(log, "Options.compaction_options_universal."
"max_size_amplification_percent: %u",
ROCKS_LOG_HEADER(log,
" Options.compaction_pri: %s",
str_compaction_pri.c_str());
ROCKS_LOG_HEADER(log,
" Options.compaction_options_universal.size_ratio: %u",
compaction_options_universal.size_ratio);
ROCKS_LOG_HEADER(log,
"Options.compaction_options_universal.min_merge_width: %u",
compaction_options_universal.min_merge_width);
ROCKS_LOG_HEADER(log,
"Options.compaction_options_universal.max_merge_width: %u",
compaction_options_universal.max_merge_width);
ROCKS_LOG_HEADER(
log,
"Options.compaction_options_universal."
"max_size_amplification_percent: %u",
compaction_options_universal.max_size_amplification_percent);
Header(log,
ROCKS_LOG_HEADER(
log,
"Options.compaction_options_universal.compression_size_percent: %d",
compaction_options_universal.compression_size_percent);
Header(log,
"Options.compaction_options_fifo.max_table_files_size: %" PRIu64,
ROCKS_LOG_HEADER(
log, "Options.compaction_options_fifo.max_table_files_size: %" PRIu64,
compaction_options_fifo.max_table_files_size);
std::string collector_names;
for (const auto& collector_factory : table_properties_collector_factories) {
collector_names.append(collector_factory->Name());
collector_names.append("; ");
}
Header(log, " Options.table_properties_collectors: %s",
ROCKS_LOG_HEADER(
log, " Options.table_properties_collectors: %s",
collector_names.c_str());
Header(log, " Options.inplace_update_support: %d",
inplace_update_support);
Header(log,
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
ROCKS_LOG_HEADER(log,
" Options.inplace_update_support: %d",
inplace_update_support);
ROCKS_LOG_HEADER(
log,
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
// TODO: easier config for bloom (maybe based on avg key/value size)
Header(log, " Options.memtable_prefix_bloom_size_ratio: %f",
memtable_prefix_bloom_size_ratio);
Header(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt,
memtable_huge_page_size);
Header(log, " Options.bloom_locality: %d",
bloom_locality);
Header(log,
" Options.max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
Header(log, " Options.optimize_filters_for_hits: %d",
optimize_filters_for_hits);
Header(log, " Options.paranoid_file_checks: %d",
paranoid_file_checks);
Header(log, " Options.force_consistency_checks: %d",
force_consistency_checks);
Header(log, " Options.report_bg_io_stats: %d",
report_bg_io_stats);
ROCKS_LOG_HEADER(
log, " Options.memtable_prefix_bloom_size_ratio: %f",
memtable_prefix_bloom_size_ratio);
ROCKS_LOG_HEADER(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt,
memtable_huge_page_size);
ROCKS_LOG_HEADER(log,
" Options.bloom_locality: %d",
bloom_locality);
ROCKS_LOG_HEADER(
log,
" Options.max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges);
ROCKS_LOG_HEADER(log,
" Options.optimize_filters_for_hits: %d",
optimize_filters_for_hits);
ROCKS_LOG_HEADER(log, " Options.paranoid_file_checks: %d",
paranoid_file_checks);
ROCKS_LOG_HEADER(log, " Options.force_consistency_checks: %d",
force_consistency_checks);
ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d",
report_bg_io_stats);
} // ColumnFamilyOptions::Dump
void Options::Dump(Logger* log) const {

@ -66,21 +66,24 @@ std::string BackupStatistics::ToString() const {
}
void BackupableDBOptions::Dump(Logger* logger) const {
Log(logger, " Options.backup_dir: %s", backup_dir.c_str());
Log(logger, " Options.backup_env: %p", backup_env);
Log(logger, " Options.share_table_files: %d",
static_cast<int>(share_table_files));
Log(logger, " Options.info_log: %p", info_log);
Log(logger, " Options.sync: %d", static_cast<int>(sync));
Log(logger, " Options.destroy_old_data: %d",
static_cast<int>(destroy_old_data));
Log(logger, " Options.backup_log_files: %d",
static_cast<int>(backup_log_files));
Log(logger, " Options.backup_rate_limit: %" PRIu64, backup_rate_limit);
Log(logger, " Options.restore_rate_limit: %" PRIu64,
restore_rate_limit);
Log(logger, "Options.max_background_operations: %d",
max_background_operations);
ROCKS_LOG_INFO(logger, " Options.backup_dir: %s",
backup_dir.c_str());
ROCKS_LOG_INFO(logger, " Options.backup_env: %p", backup_env);
ROCKS_LOG_INFO(logger, " Options.share_table_files: %d",
static_cast<int>(share_table_files));
ROCKS_LOG_INFO(logger, " Options.info_log: %p", info_log);
ROCKS_LOG_INFO(logger, " Options.sync: %d",
static_cast<int>(sync));
ROCKS_LOG_INFO(logger, " Options.destroy_old_data: %d",
static_cast<int>(destroy_old_data));
ROCKS_LOG_INFO(logger, " Options.backup_log_files: %d",
static_cast<int>(backup_log_files));
ROCKS_LOG_INFO(logger, " Options.backup_rate_limit: %" PRIu64,
backup_rate_limit);
ROCKS_LOG_INFO(logger, " Options.restore_rate_limit: %" PRIu64,
restore_rate_limit);
ROCKS_LOG_INFO(logger, "Options.max_background_operations: %d",
max_background_operations);
}
// -------- BackupEngineImpl class ---------
@ -520,7 +523,7 @@ Status BackupEngineImpl::Initialize() {
assert(!initialized_);
initialized_ = true;
if (read_only_) {
Log(options_.info_log, "Starting read_only backup engine");
ROCKS_LOG_INFO(options_.info_log, "Starting read_only backup engine");
}
options_.Dump(options_.info_log);
@ -568,15 +571,16 @@ Status BackupEngineImpl::Initialize() {
if (file == "." || file == "..") {
continue;
}
Log(options_.info_log, "Detected backup %s", file.c_str());
ROCKS_LOG_INFO(options_.info_log, "Detected backup %s", file.c_str());
BackupID backup_id = 0;
sscanf(file.c_str(), "%u", &backup_id);
if (backup_id == 0 || file != rocksdb::ToString(backup_id)) {
if (!read_only_) {
// invalid file name, delete that
auto s = backup_env_->DeleteFile(GetBackupMetaDir() + "/" + file);
Log(options_.info_log, "Unrecognized meta file %s, deleting -- %s",
file.c_str(), s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log,
"Unrecognized meta file %s, deleting -- %s",
file.c_str(), s.ToString().c_str());
}
continue;
}
@ -590,7 +594,8 @@ Status BackupEngineImpl::Initialize() {
latest_backup_id_ = 0;
if (options_.destroy_old_data) { // Destroy old data
assert(!read_only_);
Log(options_.info_log,
ROCKS_LOG_INFO(
options_.info_log,
"Backup Engine started with destroy_old_data == true, deleting all "
"backups");
auto s = PurgeOldBackups(0);
@ -615,8 +620,8 @@ Status BackupEngineImpl::Initialize() {
Status s =
backup.second->LoadFromFile(options_.backup_dir, abs_path_to_size);
if (s.IsCorruption()) {
Log(options_.info_log, "Backup %u corrupted -- %s", backup.first,
s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s",
backup.first, s.ToString().c_str());
corrupt_backups_.insert(std::make_pair(
backup.first, std::make_pair(s, std::move(backup.second))));
} else if (!s.ok()) {
@ -625,8 +630,8 @@ Status BackupEngineImpl::Initialize() {
// fail, whereas corruption errors would not cause Open() failures.
return s;
} else {
Log(options_.info_log, "Loading backup %" PRIu32 " OK:\n%s",
backup.first, backup.second->GetInfoString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Loading backup %" PRIu32 " OK:\n%s",
backup.first, backup.second->GetInfoString().c_str());
latest_backup_id_ = std::max(latest_backup_id_, backup.first);
}
}
@ -636,7 +641,7 @@ Status BackupEngineImpl::Initialize() {
}
}
Log(options_.info_log, "Latest backup is %u", latest_backup_id_);
ROCKS_LOG_INFO(options_.info_log, "Latest backup is %u", latest_backup_id_);
// set up threads perform copies from files_to_copy_or_create_ in the
// background
@ -655,7 +660,7 @@ Status BackupEngineImpl::Initialize() {
});
}
Log(options_.info_log, "Initialized BackupEngine");
ROCKS_LOG_INFO(options_.info_log, "Initialized BackupEngine");
return Status::OK();
}
@ -706,8 +711,9 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
auto start_backup = backup_env_-> NowMicros();
Log(options_.info_log, "Started the backup process -- creating backup %u",
new_backup_id);
ROCKS_LOG_INFO(options_.info_log,
"Started the backup process -- creating backup %u",
new_backup_id);
auto private_tmp_dir = GetAbsolutePath(GetPrivateFileRel(new_backup_id, true));
s = backup_env_->FileExists(private_tmp_dir);
@ -790,14 +796,16 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
manifest_fname.size(), 0 /* size_limit */, false /* shared_checksum */,
progress_callback, manifest_fname.substr(1) + "\n");
}
Log(options_.info_log, "begin add wal files for backup -- %" ROCKSDB_PRIszt,
live_wal_files.size());
ROCKS_LOG_INFO(options_.info_log,
"begin add wal files for backup -- %" ROCKSDB_PRIszt,
live_wal_files.size());
// Add a CopyOrCreateWorkItem to the channel for each WAL file
for (size_t i = 0; s.ok() && i < live_wal_files.size(); ++i) {
uint64_t size_bytes = live_wal_files[i]->SizeFileBytes();
if (live_wal_files[i]->Type() == kAliveLogFile) {
Log(options_.info_log, "add wal file for backup %s -- %" PRIu64,
live_wal_files[i]->PathName().c_str(), size_bytes);
ROCKS_LOG_INFO(options_.info_log,
"add wal file for backup %s -- %" PRIu64,
live_wal_files[i]->PathName().c_str(), size_bytes);
// we only care about live log files
// copy the file into backup_dir/files/<new backup>/
s = AddBackupFileWorkItem(live_dst_paths, backup_items_to_finish,
@ -807,7 +815,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
size_bytes, size_bytes);
}
}
Log(options_.info_log, "add files for backup done, wait finish.");
ROCKS_LOG_INFO(options_.info_log, "add files for backup done, wait finish.");
Status item_status;
for (auto& item : backup_items_to_finish) {
item.result.wait();
@ -833,7 +841,8 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
if (s.ok()) {
// move tmp private backup to real backup folder
Log(options_.info_log,
ROCKS_LOG_INFO(
options_.info_log,
"Moving tmp backup directory to the real one: %s -> %s\n",
GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)).c_str(),
GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)).c_str());
@ -876,9 +885,10 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
if (!s.ok()) {
backup_statistics_.IncrementNumberFailBackup();
// clean all the files we might have created
Log(options_.info_log, "Backup failed -- %s", s.ToString().c_str());
Log(options_.info_log, "Backup Statistics %s\n",
backup_statistics_.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Backup failed -- %s",
s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s\n",
backup_statistics_.ToString().c_str());
// delete files that we might have already written
DeleteBackup(new_backup_id);
GarbageCollect();
@ -888,27 +898,28 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
// here we know that we succeeded and installed the new backup
// in the LATEST_BACKUP file
latest_backup_id_ = new_backup_id;
Log(options_.info_log, "Backup DONE. All is good");
ROCKS_LOG_INFO(options_.info_log, "Backup DONE. All is good");
// backup_speed is in byte/second
double backup_speed = new_backup->GetSize() / (1.048576 * backup_time);
Log(options_.info_log, "Backup number of files: %u",
new_backup->GetNumberFiles());
ROCKS_LOG_INFO(options_.info_log, "Backup number of files: %u",
new_backup->GetNumberFiles());
char human_size[16];
AppendHumanBytes(new_backup->GetSize(), human_size, sizeof(human_size));
Log(options_.info_log, "Backup size: %s", human_size);
Log(options_.info_log, "Backup time: %" PRIu64 " microseconds", backup_time);
Log(options_.info_log, "Backup speed: %.3f MB/s", backup_speed);
Log(options_.info_log, "Backup Statistics %s",
backup_statistics_.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Backup size: %s", human_size);
ROCKS_LOG_INFO(options_.info_log, "Backup time: %" PRIu64 " microseconds",
backup_time);
ROCKS_LOG_INFO(options_.info_log, "Backup speed: %.3f MB/s", backup_speed);
ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s",
backup_statistics_.ToString().c_str());
return s;
}
Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) {
assert(initialized_);
assert(!read_only_);
Log(options_.info_log, "Purging old backups, keeping %u",
num_backups_to_keep);
ROCKS_LOG_INFO(options_.info_log, "Purging old backups, keeping %u",
num_backups_to_keep);
std::vector<BackupID> to_delete;
auto itr = backups_.begin();
while ((backups_.size() - to_delete.size()) > num_backups_to_keep) {
@ -927,7 +938,7 @@ Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) {
Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
assert(initialized_);
assert(!read_only_);
Log(options_.info_log, "Deleting backup %u", backup_id);
ROCKS_LOG_INFO(options_.info_log, "Deleting backup %u", backup_id);
auto backup = backups_.find(backup_id);
if (backup != backups_.end()) {
auto s = backup->second->Delete();
@ -951,8 +962,8 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
for (auto& itr : backuped_file_infos_) {
if (itr.second->refs == 0) {
Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first));
Log(options_.info_log, "Deleting %s -- %s", itr.first.c_str(),
s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", itr.first.c_str(),
s.ToString().c_str());
to_delete.push_back(itr.first);
}
}
@ -964,8 +975,8 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
// if they are not empty
std::string private_dir = GetPrivateFileRel(backup_id);
Status s = backup_env_->DeleteDir(GetAbsolutePath(private_dir));
Log(options_.info_log, "Deleting private dir %s -- %s",
private_dir.c_str(), s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Deleting private dir %s -- %s",
private_dir.c_str(), s.ToString().c_str());
return Status::OK();
}
@ -1008,9 +1019,9 @@ Status BackupEngineImpl::RestoreDBFromBackup(
return Status::NotFound("Backup not found");
}
Log(options_.info_log, "Restoring backup id %u\n", backup_id);
Log(options_.info_log, "keep_log_files: %d\n",
static_cast<int>(restore_options.keep_log_files));
ROCKS_LOG_INFO(options_.info_log, "Restoring backup id %u\n", backup_id);
ROCKS_LOG_INFO(options_.info_log, "keep_log_files: %d\n",
static_cast<int>(restore_options.keep_log_files));
// just in case. Ignore errors
db_env_->CreateDirIfMissing(db_dir);
@ -1028,8 +1039,9 @@ Status BackupEngineImpl::RestoreDBFromBackup(
FileType type;
bool ok = ParseFileName(f, &number, &type);
if (ok && type == kLogFile) {
Log(options_.info_log, "Moving log file from archive/ to wal_dir: %s",
f.c_str());
ROCKS_LOG_INFO(options_.info_log,
"Moving log file from archive/ to wal_dir: %s",
f.c_str());
Status s =
db_env_->RenameFile(archive_dir + "/" + f, wal_dir + "/" + f);
if (!s.ok()) {
@ -1079,7 +1091,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(
dst = ((type == kLogFile) ? wal_dir : db_dir) +
"/" + dst;
Log(options_.info_log, "Restoring %s to %s\n", file.c_str(), dst.c_str());
ROCKS_LOG_INFO(options_.info_log, "Restoring %s to %s\n", file.c_str(),
dst.c_str());
CopyOrCreateWorkItem copy_or_create_work_item(
GetAbsolutePath(file), dst, "" /* contents */, backup_env_, db_env_,
false, rate_limiter, 0 /* size_limit */);
@ -1106,7 +1119,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(
}
}
Log(options_.info_log, "Restoring done -- %s\n", s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Restoring done -- %s\n",
s.ToString().c_str());
return s;
}
@ -1127,7 +1141,7 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id) {
return Status::NotFound();
}
Log(options_.info_log, "Verifying backup id %u\n", backup_id);
ROCKS_LOG_INFO(options_.info_log, "Verifying backup id %u\n", backup_id);
std::unordered_map<std::string, uint64_t> curr_abs_path_to_size;
for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(),
@ -1302,14 +1316,15 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
} else if (shared && (same_path || file_exists)) {
need_to_copy = false;
if (shared_checksum) {
Log(options_.info_log,
"%s already present, with checksum %u and size %" PRIu64,
fname.c_str(), checksum_value, size_bytes);
ROCKS_LOG_INFO(options_.info_log,
"%s already present, with checksum %u and size %" PRIu64,
fname.c_str(), checksum_value, size_bytes);
} else if (backuped_file_infos_.find(dst_relative) ==
backuped_file_infos_.end() && !same_path) {
// file already exists, but it's not referenced by any backup. overwrite
// the file
Log(options_.info_log,
ROCKS_LOG_INFO(
options_.info_log,
"%s already present, but not referenced by any backup. We will "
"overwrite the file.",
fname.c_str());
@ -1317,8 +1332,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
backup_env_->DeleteFile(dst_path);
} else {
// the file is present and referenced by a backup
Log(options_.info_log, "%s already present, calculate checksum",
fname.c_str());
ROCKS_LOG_INFO(options_.info_log,
"%s already present, calculate checksum", fname.c_str());
s = CalculateChecksum(src_dir + fname, db_env_, size_limit,
&checksum_value);
}
@ -1326,8 +1341,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
live_dst_paths.insert(dst_path);
if (!contents.empty() || need_to_copy) {
Log(options_.info_log, "Copying %s to %s", fname.c_str(),
dst_path_tmp.c_str());
ROCKS_LOG_INFO(options_.info_log, "Copying %s to %s", fname.c_str(),
dst_path_tmp.c_str());
CopyOrCreateWorkItem copy_or_create_work_item(
src_dir.empty() ? "" : src_dir + fname, dst_path_tmp, contents, db_env_,
backup_env_, options_.sync, rate_limiter, size_limit,
@ -1433,7 +1448,7 @@ Status BackupEngineImpl::InsertPathnameToSizeBytes(
Status BackupEngineImpl::GarbageCollect() {
assert(!read_only_);
Log(options_.info_log, "Starting garbage collection");
ROCKS_LOG_INFO(options_.info_log, "Starting garbage collection");
if (options_.share_table_files) {
// delete obsolete shared files
@ -1459,8 +1474,8 @@ Status BackupEngineImpl::GarbageCollect() {
// this might be a directory, but DeleteFile will just fail in that
// case, so we're good
Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname));
Log(options_.info_log, "Deleting %s -- %s", rel_fname.c_str(),
s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
rel_fname.c_str(), s.ToString().c_str());
backuped_file_infos_.erase(rel_fname);
}
}
@ -1491,13 +1506,14 @@ Status BackupEngineImpl::GarbageCollect() {
backup_env_->GetChildren(full_private_path, &subchildren);
for (auto& subchild : subchildren) {
Status s = backup_env_->DeleteFile(full_private_path + subchild);
Log(options_.info_log, "Deleting %s -- %s",
(full_private_path + subchild).c_str(), s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
(full_private_path + subchild).c_str(),
s.ToString().c_str());
}
// finally delete the private dir
Status s = backup_env_->DeleteDir(full_private_path);
Log(options_.info_log, "Deleting dir %s -- %s", full_private_path.c_str(),
s.ToString().c_str());
ROCKS_LOG_INFO(options_.info_log, "Deleting dir %s -- %s",
full_private_path.c_str(), s.ToString().c_str());
}
return Status::OK();

@ -122,7 +122,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
}
size_t wal_size = live_wal_files.size();
Log(db_options.info_log,
ROCKS_LOG_INFO(
db_options.info_log,
"Started the snapshot process -- creating snapshot in directory %s",
checkpoint_dir.c_str());
@ -161,7 +162,7 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
// * if it's kDescriptorFile, limit the size to manifest_file_size
// * always copy if cross-device link
if ((type == kTableFile) && same_fs) {
Log(db_options.info_log, "Hard Linking %s", src_fname.c_str());
ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s", src_fname.c_str());
s = db_->GetEnv()->LinkFile(db_->GetName() + src_fname,
full_private_path + src_fname);
if (s.IsNotSupported()) {
@ -170,7 +171,7 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
}
}
if ((type != kTableFile) || (!same_fs)) {
Log(db_options.info_log, "Copying %s", src_fname.c_str());
ROCKS_LOG_INFO(db_options.info_log, "Copying %s", src_fname.c_str());
s = CopyFile(db_->GetEnv(), db_->GetName() + src_fname,
full_private_path + src_fname,
(type == kDescriptorFile) ? manifest_file_size : 0,
@ -181,8 +182,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
s = CreateFile(db_->GetEnv(), full_private_path + current_fname,
manifest_fname.substr(1) + "\n");
}
Log(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt,
live_wal_files.size());
ROCKS_LOG_INFO(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt,
live_wal_files.size());
// Link WAL files. Copy exact size of last one because it is the only one
// that has changes after the last flush.
@ -191,8 +192,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
(live_wal_files[i]->StartSequence() >= sequence_number ||
live_wal_files[i]->LogNumber() >= min_log_num)) {
if (i + 1 == wal_size) {
Log(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str());
s = CopyFile(db_->GetEnv(),
db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName(),
@ -201,8 +202,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
}
if (same_fs) {
// we only care about live log files
Log(db_options.info_log, "Hard Linking %s",
live_wal_files[i]->PathName().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s",
live_wal_files[i]->PathName().c_str());
s = db_->GetEnv()->LinkFile(
db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName());
@ -212,8 +213,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
}
}
if (!same_fs) {
Log(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str());
s = CopyFile(db_->GetEnv(),
db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName(), 0,
@ -239,27 +240,28 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
if (!s.ok()) {
// clean all the files we might have created
Log(db_options.info_log, "Snapshot failed -- %s", s.ToString().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Snapshot failed -- %s",
s.ToString().c_str());
// we have to delete the dir and all its children
std::vector<std::string> subchildren;
db_->GetEnv()->GetChildren(full_private_path, &subchildren);
for (auto& subchild : subchildren) {
std::string subchild_path = full_private_path + "/" + subchild;
Status s1 = db_->GetEnv()->DeleteFile(subchild_path);
Log(db_options.info_log, "Delete file %s -- %s", subchild_path.c_str(),
s1.ToString().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Delete file %s -- %s",
subchild_path.c_str(), s1.ToString().c_str());
}
// finally delete the private dir
Status s1 = db_->GetEnv()->DeleteDir(full_private_path);
Log(db_options.info_log, "Delete dir %s -- %s", full_private_path.c_str(),
s1.ToString().c_str());
ROCKS_LOG_INFO(db_options.info_log, "Delete dir %s -- %s",
full_private_path.c_str(), s1.ToString().c_str());
return s;
}
// here we know that we succeeded and installed the new snapshot
Log(db_options.info_log, "Snapshot DONE. All is good");
Log(db_options.info_log, "Snapshot sequence number: %" PRIu64,
sequence_number);
ROCKS_LOG_INFO(db_options.info_log, "Snapshot DONE. All is good");
ROCKS_LOG_INFO(db_options.info_log, "Snapshot sequence number: %" PRIu64,
sequence_number);
return s;
}

@ -9,6 +9,7 @@
#include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h"
#include "util/coding.h"
#include "util/logging.h"
#include "utilities/merge_operators.h"
using namespace rocksdb;
@ -51,10 +52,9 @@ class UInt64AddOperator : public AssociativeMergeOperator {
result = DecodeFixed64(value.data());
} else if (logger != nullptr) {
// If value is corrupted, treat it as 0
Log(InfoLogLevel::ERROR_LEVEL, logger,
"uint64 value corruption, size: %" ROCKSDB_PRIszt
" > %" ROCKSDB_PRIszt,
value.size(), sizeof(uint64_t));
ROCKS_LOG_ERROR(logger, "uint64 value corruption, size: %" ROCKSDB_PRIszt
" > %" ROCKSDB_PRIszt,
value.size(), sizeof(uint64_t));
}
return result;

@ -11,6 +11,7 @@
#include <vector>
#include "port/port.h"
#include "util/logging.h"
#include "util/stop_watch.h"
#include "util/sync_point.h"
#include "utilities/persistent_cache/block_cache_tier_file.h"
@ -109,7 +110,7 @@ Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) {
return status;
}
} else {
Debug(opt_.log, "Skipping file %s", file.c_str());
ROCKS_LOG_DEBUG(opt_.log, "Skipping file %s", file.c_str());
}
}
return Status::OK();
@ -233,8 +234,8 @@ Status BlockCacheTier::InsertImpl(const Slice& key, const Slice& data) {
while (!cache_file_->Append(key, data, &lba)) {
if (!cache_file_->Eof()) {
Debug(opt_.log, "Error inserting to cache file %d",
cache_file_->cacheid());
ROCKS_LOG_DEBUG(opt_.log, "Error inserting to cache file %d",
cache_file_->cacheid());
stats_.write_latency_.Add(timer.ElapsedNanos() / 1000);
return Status::TryAgain();
}

@ -13,8 +13,9 @@
#include <memory>
#include <vector>
#include "util/crc32c.h"
#include "port/port.h"
#include "util/crc32c.h"
#include "util/logging.h"
namespace rocksdb {
@ -201,7 +202,7 @@ bool RandomAccessCacheFile::Open(const bool enable_direct_reads) {
bool RandomAccessCacheFile::OpenImpl(const bool enable_direct_reads) {
rwlock_.AssertHeld();
Debug(log_, "Opening cache file %s", Path().c_str());
ROCKS_LOG_DEBUG(log_, "Opening cache file %s", Path().c_str());
std::unique_ptr<RandomAccessFile> file;
Status status =
@ -282,19 +283,19 @@ bool WriteableCacheFile::Create(const bool enable_direct_writes,
enable_direct_reads_ = enable_direct_reads;
Debug(log_, "Creating new cache %s (max size is %d B)", Path().c_str(),
max_size_);
ROCKS_LOG_DEBUG(log_, "Creating new cache %s (max size is %d B)",
Path().c_str(), max_size_);
Status s = env_->FileExists(Path());
if (s.ok()) {
Warn(log_, "File %s already exists. %s", Path().c_str(),
s.ToString().c_str());
ROCKS_LOG_WARN(log_, "File %s already exists. %s", Path().c_str(),
s.ToString().c_str());
}
s = NewWritableCacheFile(env_, Path(), &file_);
if (!s.ok()) {
Warn(log_, "Unable to create file %s. %s", Path().c_str(),
s.ToString().c_str());
ROCKS_LOG_WARN(log_, "Unable to create file %s. %s", Path().c_str(),
s.ToString().c_str());
return false;
}
@ -317,7 +318,7 @@ bool WriteableCacheFile::Append(const Slice& key, const Slice& val, LBA* lba) {
if (!ExpandBuffer(rec_size)) {
// unable to expand the buffer
Debug(log_, "Error expanding buffers. size=%d", rec_size);
ROCKS_LOG_DEBUG(log_, "Error expanding buffers. size=%d", rec_size);
return false;
}
@ -360,7 +361,7 @@ bool WriteableCacheFile::ExpandBuffer(const size_t size) {
while (free < size) {
CacheWriteBuffer* const buf = alloc_->Allocate();
if (!buf) {
Debug(log_, "Unable to allocate buffers");
ROCKS_LOG_DEBUG(log_, "Unable to allocate buffers");
return false;
}

@ -233,8 +233,8 @@ class TtlMergeOperator : public MergeOperator {
MergeOperationOutput* merge_out) const override {
const uint32_t ts_len = DBWithTTLImpl::kTSLength;
if (merge_in.existing_value && merge_in.existing_value->size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
"Error: Could not remove timestamp from existing value.");
ROCKS_LOG_ERROR(merge_in.logger,
"Error: Could not remove timestamp from existing value.");
return false;
}
@ -242,7 +242,8 @@ class TtlMergeOperator : public MergeOperator {
std::vector<Slice> operands_without_ts;
for (const auto& operand : merge_in.operand_list) {
if (operand.size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
ROCKS_LOG_ERROR(
merge_in.logger,
"Error: Could not remove timestamp from operand value.");
return false;
}
@ -282,7 +283,8 @@ class TtlMergeOperator : public MergeOperator {
// Augment the *new_value with the ttl time-stamp
int64_t curtime;
if (!env_->GetCurrentTime(&curtime).ok()) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
ROCKS_LOG_ERROR(
merge_in.logger,
"Error: Could not get current time to be attached internally "
"to the new value.");
return false;
@ -303,8 +305,8 @@ class TtlMergeOperator : public MergeOperator {
for (const auto& operand : operand_list) {
if (operand.size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, logger,
"Error: Could not remove timestamp from value.");
ROCKS_LOG_ERROR(logger,
"Error: Could not remove timestamp from value.");
return false;
}
@ -322,7 +324,8 @@ class TtlMergeOperator : public MergeOperator {
// Augment the *new_value with the ttl time-stamp
int64_t curtime;
if (!env_->GetCurrentTime(&curtime).ok()) {
Log(InfoLogLevel::ERROR_LEVEL, logger,
ROCKS_LOG_ERROR(
logger,
"Error: Could not get current time to be attached internally "
"to the new value.");
return false;

Loading…
Cancel
Save