Fix the string format issue

Summary:

mac and our dev server has totally differnt definition of uint64_t, therefore fixing the warning in mac has actually made code in linux uncompileable.

Test Plan:

make clean && make -j32
main
Kai Liu 11 years ago
parent d88d8ecf80
commit 35460ccb53
  1. 67
      db/db_impl.cc
  2. 16
      db/memtablelist.cc
  3. 8
      db/transaction_log_impl.cc
  4. 136
      db/version_set.cc
  5. 13
      util/histogram.cc
  6. 17
      util/ldb_cmd.cc
  7. 28
      util/options.cc

@ -563,9 +563,10 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
table_cache_->Evict(number);
}
std::string fname = dbname_ + "/" + state.all_files[i];
Log(options_.info_log, "Delete type=%d #%lu -- %s",
int(type), number, fname.c_str());
Log(options_.info_log,
"Delete type=%d #%lu",
int(type),
(unsigned long)number);
Status st;
if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
@ -573,14 +574,15 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
st = env_->RenameFile(fname,
ArchivedLogFileName(options_.wal_dir, number));
if (!st.ok()) {
Log(options_.info_log, "RenameFile logfile #%lu FAILED -- %s\n",
number, st.ToString().c_str());
Log(options_.info_log,
"RenameFile logfile #%lu FAILED",
(unsigned long)number);
}
} else {
st = env_->DeleteFile(fname);
if (!st.ok()) {
Log(options_.info_log, "Delete type=%d #%lu FAILED -- %s\n",
int(type), number, st.ToString().c_str());
Log(options_.info_log, "Delete type=%d #%lu FAILED\n",
int(type), (unsigned long)number);
}
}
}
@ -887,8 +889,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
// large sequence numbers).
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu",
(unsigned long long) log_number);
Log(options_.info_log, "Recovering log #%lu",
(unsigned long) log_number);
// Read all the records and add to a memtable
std::string scratch;
@ -956,8 +958,8 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable =
mem->GetFirstSequenceNumber();
Log(options_.info_log, "Level-0 table #%llu: started",
(unsigned long long) meta.number);
Log(options_.info_log, "Level-0 table #%lu: started",
(unsigned long) meta.number);
Status s;
{
@ -970,9 +972,9 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
mutex_.Lock();
}
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
(unsigned long long) meta.number,
(unsigned long long) meta.file_size,
Log(options_.info_log, "Level-0 table #%lu: %lu bytes %s",
(unsigned long) meta.number,
(unsigned long) meta.file_size,
s.ToString().c_str());
delete iter;
@ -1008,8 +1010,8 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
std::vector<Iterator*> list;
for (MemTable* m : mems) {
Log(options_.info_log,
"Flushing memtable with log file: %llu\n",
m->GetLogNumber());
"Flushing memtable with log file: %lu\n",
(unsigned long)m->GetLogNumber());
list.push_back(m->NewIterator());
}
Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
@ -1017,7 +1019,9 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable =
mems[0]->GetFirstSequenceNumber();
Log(options_.info_log, "Level-0 flush table #%llu: started", meta.number);
Log(options_.info_log,
"Level-0 flush table #%lu: started",
(unsigned long)meta.number);
Version* base = versions_->current();
base->Ref(); // it is likely that we do not need this reference
@ -1038,9 +1042,9 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
}
base->Unref();
Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s",
(unsigned long long) meta.number,
(unsigned long long) meta.file_size,
Log(options_.info_log, "Level-0 flush table #%lu: %lu bytes %s",
(unsigned long) meta.number,
(unsigned long) meta.file_size,
s.ToString().c_str());
delete iter;
@ -1969,10 +1973,10 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
delete iter;
if (s.ok()) {
Log(options_.info_log,
"Generated table #%llu: %lld keys, %lld bytes",
(unsigned long long) output_number,
(unsigned long long) current_entries,
(unsigned long long) current_bytes);
"Generated table #%lu: %lu keys, %lu bytes",
(unsigned long) output_number,
(unsigned long) current_entries,
(unsigned long) current_bytes);
}
}
return s;
@ -2037,8 +2041,9 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
assert(prev);
}
Log(options_.info_log,
"Looking for seqid %llu but maxseqid is %llu", in,
snapshots[snapshots.size()-1]);
"Looking for seqid %lu but maxseqid is %lu",
(unsigned long)in,
(unsigned long)snapshots[snapshots.size()-1]);
assert(0);
return 0;
}
@ -2949,9 +2954,6 @@ Status DBImpl::MakeRoomForWrite(bool force) {
stall_level0_slowdown_ += delayed;
stall_level0_slowdown_count_++;
allow_delay = false; // Do not delay a single write more than once
//Log(options_.info_log,
// "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
// (long long unsigned int)delayed);
mutex_.Lock();
delayed_writes_++;
} else if (!force &&
@ -3014,9 +3016,6 @@ Status DBImpl::MakeRoomForWrite(bool force) {
(unsigned)options_.rate_limit_delay_max_milliseconds) {
allow_hard_rate_limit_delay = false;
}
// Log(options_.info_log,
// "delaying write %llu usecs for rate limits with max score %.2f\n",
// (long long unsigned int)delayed, score);
mutex_.Lock();
} else if (
allow_soft_rate_limit_delay &&
@ -3068,8 +3067,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
mem_->Ref();
Log(options_.info_log,
"New memtable created with log file: #%llu\n",
logfile_number_);
"New memtable created with log file: #%lu\n",
(unsigned long)logfile_number_);
mem_->SetLogNumber(logfile_number_);
force = false; // Do not force another compaction if have room
MaybeScheduleFlushOrCompaction();

@ -122,7 +122,9 @@ Status MemTableList::InstallMemtableFlushResults(
break;
}
Log(info_log, "Level-0 commit table #%llu started", m->file_number_);
Log(info_log,
"Level-0 commit table #%lu started",
(unsigned long)m->file_number_);
// this can release and reacquire the mutex.
s = vset->LogAndApply(&m->edit_, mu);
@ -133,9 +135,9 @@ Status MemTableList::InstallMemtableFlushResults(
do {
if (s.ok()) { // commit new state
Log(info_log,
"Level-0 commit table #%llu: memtable #%llu done",
m->file_number_,
mem_id);
"Level-0 commit table #%lu: memtable #%lu done",
(unsigned long)m->file_number_,
(unsigned long)mem_id);
memlist_.remove(m);
assert(m->file_number_ > 0);
@ -149,9 +151,9 @@ Status MemTableList::InstallMemtableFlushResults(
} else {
//commit failed. setup state so that we can flush again.
Log(info_log,
"Level-0 commit table #%llu: memtable #%llu failed",
m->file_number_,
mem_id);
"Level-0 commit table #%lu: memtable #%lu failed",
(unsigned long)m->file_number_,
(unsigned long)mem_id);
m->flush_completed_ = false;
m->flush_in_progress_ = false;
m->edit_.Clear();

@ -205,10 +205,12 @@ bool TransactionLogIteratorImpl::IsBatchExpected(
if (batchSeq != expectedSeq) {
char buf[200];
snprintf(buf, sizeof(buf),
"Discontinuity in log records. Got seq=%llu, Expected seq=%llu, "
"Last flushed seq=%llu.Log iterator will reseek the correct "
"Discontinuity in log records. Got seq=%lu, Expected seq=%lu, "
"Last flushed seq=%lu.Log iterator will reseek the correct "
"batch.",
batchSeq, expectedSeq, dbimpl_->GetLatestSequenceNumber());
(unsigned long)batchSeq,
(unsigned long)expectedSeq,
(unsigned long)dbimpl_->GetLatestSequenceNumber());
reporter_.Info(buf);
return false;
}

@ -1322,8 +1322,9 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
if (s.ok() && old_manifest_file_number < manifest_file_number_) {
// delete old manifest file
Log(options_->info_log,
"Deleting manifest %llu current manifest %llu\n",
old_manifest_file_number, manifest_file_number_);
"Deleting manifest %lu current manifest %lu\n",
(unsigned long)old_manifest_file_number,
(unsigned long)manifest_file_number_);
// we don't care about an error here, PurgeObsoleteFiles will take care
// of it later
env_->DeleteFile(DescriptorFileName(dbname_, old_manifest_file_number));
@ -1348,8 +1349,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
prev_log_number_ = edit->prev_log_number_;
} else {
Log(options_->info_log, "Error in committing version %llu",
v->GetVersionNumber());
Log(options_->info_log, "Error in committing version %lu",
(unsigned long)v->GetVersionNumber());
delete v;
if (!new_manifest_file.empty()) {
descriptor_log_.reset();
@ -1521,11 +1522,15 @@ Status VersionSet::Recover() {
prev_log_number_ = prev_log_number;
Log(options_->info_log, "Recovered from manifest file:%s succeeded,"
"manifest_file_number is %llu, next_file_number is %llu, "
"last_sequence is %llu, log_number is %llu,"
"prev_log_number is %llu\n",
current.c_str(), manifest_file_number_, next_file_number_,
last_sequence_, log_number_, prev_log_number_);
"manifest_file_number is %lu, next_file_number is %lu, "
"last_sequence is %lu, log_number is %lu,"
"prev_log_number is %lu\n",
current.c_str(),
(unsigned long)manifest_file_number_,
(unsigned long)next_file_number_,
(unsigned long)last_sequence_,
(unsigned long)log_number_,
(unsigned long)prev_log_number_);
}
return s;
@ -1647,10 +1652,13 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
log_number_ = log_number;
prev_log_number_ = prev_log_number;
printf("manifest_file_number %llu next_file_number %llu last_sequence "
"%llu log_number %llu prev_log_number %llu\n",
manifest_file_number_, next_file_number_,
last_sequence, log_number, prev_log_number);
printf("manifest_file_number %lu next_file_number %lu last_sequence "
"%lu log_number %lu prev_log_number %lu\n",
(unsigned long)manifest_file_number_,
(unsigned long)next_file_number_,
(unsigned long)last_sequence,
(unsigned long)log_number,
(unsigned long)prev_log_number);
printf("%s \n", v->DebugString(hex).c_str());
}
@ -1864,8 +1872,8 @@ const char* VersionSet::LevelDataSizeSummary(
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
for (int i = 0; i < NumberLevels(); i++) {
int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz, "%llu ",
NumLevelBytes(i));
int ret = snprintf(scratch->buffer + len, sz, "%lu ",
(unsigned long)NumLevelBytes(i));
if (ret < 0 || ret >= sz)
break;
len += ret;
@ -1881,9 +1889,11 @@ const char* VersionSet::LevelFileSummary(
FileMetaData* f = current_->files_[level][i];
int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz,
"#%llu(seq=%llu,sz=%llu,%d) ",
f->number, f->smallest_seqno,
f->file_size, f->being_compacted);
"#%lu(seq=%lu,sz=%lu,%lu) ",
(unsigned long)f->number,
(unsigned long)f->smallest_seqno,
(unsigned long)f->file_size,
(unsigned long)f->being_compacted);
if (ret < 0 || ret >= sz)
break;
len += ret;
@ -2221,16 +2231,20 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
start_index = loop; // Consider this as the first candidate.
break;
}
Log(options_->info_log, "Universal: skipping file %llu[%d] compacted %s",
f->number, loop, " cannot be a candidate to reduce size amp.\n");
Log(options_->info_log, "Universal: skipping file %lu[%d] compacted %s",
(unsigned long)f->number,
loop,
" cannot be a candidate to reduce size amp.\n");
f = nullptr;
}
if (f == nullptr) {
return nullptr; // no candidate files
}
Log(options_->info_log, "Universal: First candidate file %llu[%d] %s",
f->number, start_index, " to reduce size amp.\n");
Log(options_->info_log, "Universal: First candidate file %lu[%d] %s",
(unsigned long)f->number,
start_index,
" to reduce size amp.\n");
// keep adding up all the remaining files
for (unsigned int loop = start_index; loop < file_by_time.size() - 1;
@ -2239,7 +2253,9 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index];
if (f->being_compacted) {
Log(options_->info_log,
"Universal: Possible candidate file %llu[%d] %s.", f->number, loop,
"Universal: Possible candidate file %lu[%d] %s.",
(unsigned long)f->number,
loop,
" is already being compacted. No size amp reduction possible.\n");
return nullptr;
}
@ -2257,15 +2273,17 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
// size amplification = percentage of additional size
if (candidate_size * 100 < ratio * earliest_file_size) {
Log(options_->info_log,
"Universal: size amp not needed. newer-files-total-size %llu "
"earliest-file-size %llu",
candidate_size, earliest_file_size);
"Universal: size amp not needed. newer-files-total-size %lu "
"earliest-file-size %lu",
(unsigned long)candidate_size,
(unsigned long)earliest_file_size);
return nullptr;
} else {
Log(options_->info_log,
"Universal: size amp needed. newer-files-total-size %llu "
"earliest-file-size %llu",
candidate_size, earliest_file_size);
"Universal: size amp needed. newer-files-total-size %lu "
"earliest-file-size %lu",
(unsigned long)candidate_size,
(unsigned long)earliest_file_size);
}
assert(start_index >= 0 && start_index < file_by_time.size() - 1);
@ -2280,8 +2298,10 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index];
c->inputs_[0].push_back(f);
Log(options_->info_log,
"Universal: size amp picking file %llu[%d] with size %llu",
f->number, index, f->file_size);
"Universal: size amp picking file %lu[%d] with size %lu",
(unsigned long)f->number,
index,
(unsigned long)f->file_size);
}
return c;
}
@ -2327,8 +2347,8 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
break;
}
Log(options_->info_log,
"Universal: file %llu[%d] being compacted, skipping",
f->number, loop);
"Universal: file %lu[%d] being compacted, skipping",
(unsigned long)f->number, loop);
f = nullptr;
}
@ -2336,8 +2356,8 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
// first candidate to be compacted.
uint64_t candidate_size = f != nullptr? f->file_size : 0;
if (f != nullptr) {
Log(options_->info_log, "Universal: Possible candidate file %llu[%d].",
f->number, loop);
Log(options_->info_log, "Universal: Possible candidate file %lu[%d].",
(unsigned long)f->number, loop);
}
// Check if the suceeding files need compaction.
@ -2370,8 +2390,11 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index];
Log(options_->info_log,
"Universal: Skipping file %llu[%d] with size %llu %d\n",
f->number, i, f->file_size, f->being_compacted);
"Universal: Skipping file %lu[%d] with size %lu %d\n",
(unsigned long)f->number,
i,
(unsigned long)f->file_size,
f->being_compacted);
}
}
}
@ -2405,8 +2428,10 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index];
c->inputs_[0].push_back(f);
Log(options_->info_log, "Universal: Picking file %llu[%d] with size %llu\n",
f->number, i, f->file_size);
Log(options_->info_log, "Universal: Picking file %lu[%d] with size %lu\n",
(unsigned long)f->number,
i,
(unsigned long)f->file_size);
}
return c;
}
@ -2792,16 +2817,17 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (expanded1.size() == c->inputs_[1].size() &&
!FilesInCompaction(expanded1)) {
Log(options_->info_log,
"Expanding@%d %d+%d (%llu+%llu bytes) to %d+%d (%llu+%llu bytes)\n",
level,
int(c->inputs_[0].size()),
int(c->inputs_[1].size()),
inputs0_size,
inputs1_size,
int(expanded0.size()),
int(expanded1.size()),
expanded0_size,
inputs1_size);
"Expanding@%lu %lu+%lu (%lu+%lu bytes) to %lu+%lu (%lu+%lu bytes)"
"\n",
(unsigned long)level,
(unsigned long)(c->inputs_[0].size()),
(unsigned long)(c->inputs_[1].size()),
(unsigned long)inputs0_size,
(unsigned long)inputs1_size,
(unsigned long)(expanded0.size()),
(unsigned long)(expanded1.size()),
(unsigned long)expanded0_size,
(unsigned long)inputs1_size);
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
@ -3091,9 +3117,9 @@ static void InputSummary(std::vector<FileMetaData*>& files,
int write = 0;
for (unsigned int i = 0; i < files.size(); i++) {
int sz = len - write;
int ret = snprintf(output + write, sz, "%llu(%llu) ",
files.at(i)->number,
files.at(i)->file_size);
int ret = snprintf(output + write, sz, "%lu(%lu) ",
(unsigned long)files.at(i)->number,
(unsigned long)files.at(i)->file_size);
if (ret < 0 || ret >= sz)
break;
write += ret;
@ -3102,8 +3128,10 @@ static void InputSummary(std::vector<FileMetaData*>& files,
void Compaction::Summary(char* output, int len) {
int write = snprintf(output, len,
"Base version %llu Base level %d, seek compaction:%d, inputs:",
input_version_->GetVersionNumber(), level_, seek_compaction_);
"Base version %lu Base level %d, seek compaction:%d, inputs:",
(unsigned long)input_version_->GetVersionNumber(),
level_,
seek_compaction_);
if (write < 0 || write > len) {
return;
}

@ -165,12 +165,13 @@ std::string HistogramImpl::ToString() const {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
snprintf(buf, sizeof(buf),
"[ %7llu, %7llu ) %8llu %7.3f%% %7.3f%% ",
((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)), // left
bucketMapper.BucketLimit(b), // right
buckets_[b], // count
mult * buckets_[b], // percentage
mult * sum); // cumulative percentage
"[ %7lu, %7lu ) %8lu %7.3f%% %7.3f%% ",
// left
(unsigned long)((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)),
(unsigned long)bucketMapper.BucketLimit(b), // right
buckets_[b], // count
(mult * buckets_[b]), // percentage
(mult * sum)); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.

@ -508,7 +508,10 @@ void ManifestDumpCommand::DoCommand() {
while ((entry = readdir(d)) != nullptr) {
unsigned int match;
unsigned long long num;
if (sscanf(entry->d_name, "MANIFEST-%llu%n", &num, &match)
if (sscanf(entry->d_name,
"MANIFEST-%ln%ln",
(unsigned long*)&num,
(unsigned long*)&match)
&& match == strlen(entry->d_name)) {
if (!found) {
manifestfile = db_path_ + "/" + std::string(entry->d_name);
@ -570,13 +573,15 @@ void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
int ttl_end, int bucket_size, int num_buckets) {
int time_point = ttl_start;
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
fprintf(stdout, "Keys in range %s to %s : %llu\n",
fprintf(stdout, "Keys in range %s to %s : %lu\n",
ReadableTime(time_point).c_str(),
ReadableTime(time_point + bucket_size).c_str(), bucket_counts[i]);
ReadableTime(time_point + bucket_size).c_str(),
(unsigned long)bucket_counts[i]);
}
fprintf(stdout, "Keys in range %s to %s : %llu\n",
fprintf(stdout, "Keys in range %s to %s : %lu\n",
ReadableTime(time_point).c_str(),
ReadableTime(ttl_end).c_str(), bucket_counts[num_buckets - 1]);
ReadableTime(ttl_end).c_str(),
(unsigned long)bucket_counts[num_buckets - 1]);
}
const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
@ -1424,7 +1429,7 @@ void ApproxSizeCommand::DoCommand() {
ranges[0] = Range(start_key_, end_key_);
uint64_t sizes[1];
db_->GetApproximateSizes(ranges, 1, sizes);
fprintf(stdout, "%llu\n", sizes[0]);
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
/* Weird that GetApproximateSizes() returns void, although documentation
* says that it returns a Status object.
if (!st.ok()) {

@ -161,8 +161,8 @@ Options::Dump(Logger* log) const
Log(log," Options.disableDataSync: %d", disableDataSync);
Log(log," Options.use_fsync: %d", use_fsync);
Log(log," Options.max_log_file_size: %ld", max_log_file_size);
Log(log,"Options.max_manifest_file_size: %llu",
max_manifest_file_size);
Log(log,"Options.max_manifest_file_size: %lu",
(unsigned long)max_manifest_file_size);
Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll);
Log(log," Options.keep_log_file_num: %ld", keep_log_file_num);
Log(log," Options.db_stats_log_interval: %d",
@ -192,16 +192,16 @@ Options::Dump(Logger* log) const
target_file_size_base);
Log(log," Options.target_file_size_multiplier: %d",
target_file_size_multiplier);
Log(log," Options.max_bytes_for_level_base: %llu",
max_bytes_for_level_base);
Log(log," Options.max_bytes_for_level_base: %lu",
(unsigned long)max_bytes_for_level_base);
Log(log," Options.max_bytes_for_level_multiplier: %d",
max_bytes_for_level_multiplier);
for (int i = 0; i < num_levels; i++) {
Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d",
i, max_bytes_for_level_multiplier_additional[i]);
}
Log(log," Options.max_sequential_skip_in_iterations: %llu",
max_sequential_skip_in_iterations);
Log(log," Options.max_sequential_skip_in_iterations: %lu",
(unsigned long)max_sequential_skip_in_iterations);
Log(log," Options.expanded_compaction_factor: %d",
expanded_compaction_factor);
Log(log," Options.source_compaction_factor: %d",
@ -222,8 +222,8 @@ Options::Dump(Logger* log) const
table_cache_remove_scan_count_limit);
Log(log," Options.arena_block_size: %ld",
arena_block_size);
Log(log," Options.delete_obsolete_files_period_micros: %llu",
delete_obsolete_files_period_micros);
Log(log," Options.delete_obsolete_files_period_micros: %lu",
(unsigned long)delete_obsolete_files_period_micros);
Log(log," Options.max_background_compactions: %d",
max_background_compactions);
Log(log," Options.max_background_flushes: %d",
@ -236,10 +236,10 @@ Options::Dump(Logger* log) const
rate_limit_delay_max_milliseconds);
Log(log," Options.disable_auto_compactions: %d",
disable_auto_compactions);
Log(log," Options.WAL_ttl_seconds: %llu",
WAL_ttl_seconds);
Log(log," Options.WAL_size_limit_MB: %llu",
WAL_size_limit_MB);
Log(log," Options.WAL_ttl_seconds: %lu",
(unsigned long)WAL_ttl_seconds);
Log(log," Options.WAL_size_limit_MB: %lu",
(unsigned long)WAL_size_limit_MB);
Log(log," Options.manifest_preallocation_size: %ld",
manifest_preallocation_size);
Log(log," Options.purge_redundant_kvs_while_flush: %d",
@ -264,8 +264,8 @@ Options::Dump(Logger* log) const
access_hints[access_hint_on_compaction_start]);
Log(log," Options.use_adaptive_mutex: %d",
use_adaptive_mutex);
Log(log," Options.bytes_per_sync: %llu",
bytes_per_sync);
Log(log," Options.bytes_per_sync: %lu",
(unsigned long)bytes_per_sync);
Log(log," Options.filter_deletes: %d",
filter_deletes);
Log(log," Options.compaction_style: %d",

Loading…
Cancel
Save