Fix the string format issue

Summary:

mac and our dev server has totally differnt definition of uint64_t, therefore fixing the warning in mac has actually made code in linux uncompileable.

Test Plan:

make clean && make -j32
main
Kai Liu 11 years ago
parent d88d8ecf80
commit 35460ccb53
  1. 67
      db/db_impl.cc
  2. 16
      db/memtablelist.cc
  3. 8
      db/transaction_log_impl.cc
  4. 136
      db/version_set.cc
  5. 13
      util/histogram.cc
  6. 17
      util/ldb_cmd.cc
  7. 28
      util/options.cc

@ -563,9 +563,10 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
table_cache_->Evict(number); table_cache_->Evict(number);
} }
std::string fname = dbname_ + "/" + state.all_files[i]; std::string fname = dbname_ + "/" + state.all_files[i];
Log(options_.info_log,
Log(options_.info_log, "Delete type=%d #%lu -- %s", "Delete type=%d #%lu",
int(type), number, fname.c_str()); int(type),
(unsigned long)number);
Status st; Status st;
if (type == kLogFile && (options_.WAL_ttl_seconds > 0 || if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
@ -573,14 +574,15 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
st = env_->RenameFile(fname, st = env_->RenameFile(fname,
ArchivedLogFileName(options_.wal_dir, number)); ArchivedLogFileName(options_.wal_dir, number));
if (!st.ok()) { if (!st.ok()) {
Log(options_.info_log, "RenameFile logfile #%lu FAILED -- %s\n", Log(options_.info_log,
number, st.ToString().c_str()); "RenameFile logfile #%lu FAILED",
(unsigned long)number);
} }
} else { } else {
st = env_->DeleteFile(fname); st = env_->DeleteFile(fname);
if (!st.ok()) { if (!st.ok()) {
Log(options_.info_log, "Delete type=%d #%lu FAILED -- %s\n", Log(options_.info_log, "Delete type=%d #%lu FAILED\n",
int(type), number, st.ToString().c_str()); int(type), (unsigned long)number);
} }
} }
} }
@ -887,8 +889,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
// large sequence numbers). // large sequence numbers).
log::Reader reader(std::move(file), &reporter, true/*checksum*/, log::Reader reader(std::move(file), &reporter, true/*checksum*/,
0/*initial_offset*/); 0/*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu", Log(options_.info_log, "Recovering log #%lu",
(unsigned long long) log_number); (unsigned long) log_number);
// Read all the records and add to a memtable // Read all the records and add to a memtable
std::string scratch; std::string scratch;
@ -956,8 +958,8 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
const SequenceNumber newest_snapshot = snapshots_.GetNewest(); const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable = const SequenceNumber earliest_seqno_in_memtable =
mem->GetFirstSequenceNumber(); mem->GetFirstSequenceNumber();
Log(options_.info_log, "Level-0 table #%llu: started", Log(options_.info_log, "Level-0 table #%lu: started",
(unsigned long long) meta.number); (unsigned long) meta.number);
Status s; Status s;
{ {
@ -970,9 +972,9 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
mutex_.Lock(); mutex_.Lock();
} }
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s", Log(options_.info_log, "Level-0 table #%lu: %lu bytes %s",
(unsigned long long) meta.number, (unsigned long) meta.number,
(unsigned long long) meta.file_size, (unsigned long) meta.file_size,
s.ToString().c_str()); s.ToString().c_str());
delete iter; delete iter;
@ -1008,8 +1010,8 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
std::vector<Iterator*> list; std::vector<Iterator*> list;
for (MemTable* m : mems) { for (MemTable* m : mems) {
Log(options_.info_log, Log(options_.info_log,
"Flushing memtable with log file: %llu\n", "Flushing memtable with log file: %lu\n",
m->GetLogNumber()); (unsigned long)m->GetLogNumber());
list.push_back(m->NewIterator()); list.push_back(m->NewIterator());
} }
Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0], Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
@ -1017,7 +1019,9 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
const SequenceNumber newest_snapshot = snapshots_.GetNewest(); const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable = const SequenceNumber earliest_seqno_in_memtable =
mems[0]->GetFirstSequenceNumber(); mems[0]->GetFirstSequenceNumber();
Log(options_.info_log, "Level-0 flush table #%llu: started", meta.number); Log(options_.info_log,
"Level-0 flush table #%lu: started",
(unsigned long)meta.number);
Version* base = versions_->current(); Version* base = versions_->current();
base->Ref(); // it is likely that we do not need this reference base->Ref(); // it is likely that we do not need this reference
@ -1038,9 +1042,9 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
} }
base->Unref(); base->Unref();
Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s", Log(options_.info_log, "Level-0 flush table #%lu: %lu bytes %s",
(unsigned long long) meta.number, (unsigned long) meta.number,
(unsigned long long) meta.file_size, (unsigned long) meta.file_size,
s.ToString().c_str()); s.ToString().c_str());
delete iter; delete iter;
@ -1969,10 +1973,10 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
delete iter; delete iter;
if (s.ok()) { if (s.ok()) {
Log(options_.info_log, Log(options_.info_log,
"Generated table #%llu: %lld keys, %lld bytes", "Generated table #%lu: %lu keys, %lu bytes",
(unsigned long long) output_number, (unsigned long) output_number,
(unsigned long long) current_entries, (unsigned long) current_entries,
(unsigned long long) current_bytes); (unsigned long) current_bytes);
} }
} }
return s; return s;
@ -2037,8 +2041,9 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
assert(prev); assert(prev);
} }
Log(options_.info_log, Log(options_.info_log,
"Looking for seqid %llu but maxseqid is %llu", in, "Looking for seqid %lu but maxseqid is %lu",
snapshots[snapshots.size()-1]); (unsigned long)in,
(unsigned long)snapshots[snapshots.size()-1]);
assert(0); assert(0);
return 0; return 0;
} }
@ -2949,9 +2954,6 @@ Status DBImpl::MakeRoomForWrite(bool force) {
stall_level0_slowdown_ += delayed; stall_level0_slowdown_ += delayed;
stall_level0_slowdown_count_++; stall_level0_slowdown_count_++;
allow_delay = false; // Do not delay a single write more than once allow_delay = false; // Do not delay a single write more than once
//Log(options_.info_log,
// "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
// (long long unsigned int)delayed);
mutex_.Lock(); mutex_.Lock();
delayed_writes_++; delayed_writes_++;
} else if (!force && } else if (!force &&
@ -3014,9 +3016,6 @@ Status DBImpl::MakeRoomForWrite(bool force) {
(unsigned)options_.rate_limit_delay_max_milliseconds) { (unsigned)options_.rate_limit_delay_max_milliseconds) {
allow_hard_rate_limit_delay = false; allow_hard_rate_limit_delay = false;
} }
// Log(options_.info_log,
// "delaying write %llu usecs for rate limits with max score %.2f\n",
// (long long unsigned int)delayed, score);
mutex_.Lock(); mutex_.Lock();
} else if ( } else if (
allow_soft_rate_limit_delay && allow_soft_rate_limit_delay &&
@ -3068,8 +3067,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
internal_comparator_, mem_rep_factory_, NumberLevels(), options_); internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
mem_->Ref(); mem_->Ref();
Log(options_.info_log, Log(options_.info_log,
"New memtable created with log file: #%llu\n", "New memtable created with log file: #%lu\n",
logfile_number_); (unsigned long)logfile_number_);
mem_->SetLogNumber(logfile_number_); mem_->SetLogNumber(logfile_number_);
force = false; // Do not force another compaction if have room force = false; // Do not force another compaction if have room
MaybeScheduleFlushOrCompaction(); MaybeScheduleFlushOrCompaction();

@ -122,7 +122,9 @@ Status MemTableList::InstallMemtableFlushResults(
break; break;
} }
Log(info_log, "Level-0 commit table #%llu started", m->file_number_); Log(info_log,
"Level-0 commit table #%lu started",
(unsigned long)m->file_number_);
// this can release and reacquire the mutex. // this can release and reacquire the mutex.
s = vset->LogAndApply(&m->edit_, mu); s = vset->LogAndApply(&m->edit_, mu);
@ -133,9 +135,9 @@ Status MemTableList::InstallMemtableFlushResults(
do { do {
if (s.ok()) { // commit new state if (s.ok()) { // commit new state
Log(info_log, Log(info_log,
"Level-0 commit table #%llu: memtable #%llu done", "Level-0 commit table #%lu: memtable #%lu done",
m->file_number_, (unsigned long)m->file_number_,
mem_id); (unsigned long)mem_id);
memlist_.remove(m); memlist_.remove(m);
assert(m->file_number_ > 0); assert(m->file_number_ > 0);
@ -149,9 +151,9 @@ Status MemTableList::InstallMemtableFlushResults(
} else { } else {
//commit failed. setup state so that we can flush again. //commit failed. setup state so that we can flush again.
Log(info_log, Log(info_log,
"Level-0 commit table #%llu: memtable #%llu failed", "Level-0 commit table #%lu: memtable #%lu failed",
m->file_number_, (unsigned long)m->file_number_,
mem_id); (unsigned long)mem_id);
m->flush_completed_ = false; m->flush_completed_ = false;
m->flush_in_progress_ = false; m->flush_in_progress_ = false;
m->edit_.Clear(); m->edit_.Clear();

@ -205,10 +205,12 @@ bool TransactionLogIteratorImpl::IsBatchExpected(
if (batchSeq != expectedSeq) { if (batchSeq != expectedSeq) {
char buf[200]; char buf[200];
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"Discontinuity in log records. Got seq=%llu, Expected seq=%llu, " "Discontinuity in log records. Got seq=%lu, Expected seq=%lu, "
"Last flushed seq=%llu.Log iterator will reseek the correct " "Last flushed seq=%lu.Log iterator will reseek the correct "
"batch.", "batch.",
batchSeq, expectedSeq, dbimpl_->GetLatestSequenceNumber()); (unsigned long)batchSeq,
(unsigned long)expectedSeq,
(unsigned long)dbimpl_->GetLatestSequenceNumber());
reporter_.Info(buf); reporter_.Info(buf);
return false; return false;
} }

@ -1322,8 +1322,9 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
if (s.ok() && old_manifest_file_number < manifest_file_number_) { if (s.ok() && old_manifest_file_number < manifest_file_number_) {
// delete old manifest file // delete old manifest file
Log(options_->info_log, Log(options_->info_log,
"Deleting manifest %llu current manifest %llu\n", "Deleting manifest %lu current manifest %lu\n",
old_manifest_file_number, manifest_file_number_); (unsigned long)old_manifest_file_number,
(unsigned long)manifest_file_number_);
// we don't care about an error here, PurgeObsoleteFiles will take care // we don't care about an error here, PurgeObsoleteFiles will take care
// of it later // of it later
env_->DeleteFile(DescriptorFileName(dbname_, old_manifest_file_number)); env_->DeleteFile(DescriptorFileName(dbname_, old_manifest_file_number));
@ -1348,8 +1349,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
prev_log_number_ = edit->prev_log_number_; prev_log_number_ = edit->prev_log_number_;
} else { } else {
Log(options_->info_log, "Error in committing version %llu", Log(options_->info_log, "Error in committing version %lu",
v->GetVersionNumber()); (unsigned long)v->GetVersionNumber());
delete v; delete v;
if (!new_manifest_file.empty()) { if (!new_manifest_file.empty()) {
descriptor_log_.reset(); descriptor_log_.reset();
@ -1521,11 +1522,15 @@ Status VersionSet::Recover() {
prev_log_number_ = prev_log_number; prev_log_number_ = prev_log_number;
Log(options_->info_log, "Recovered from manifest file:%s succeeded," Log(options_->info_log, "Recovered from manifest file:%s succeeded,"
"manifest_file_number is %llu, next_file_number is %llu, " "manifest_file_number is %lu, next_file_number is %lu, "
"last_sequence is %llu, log_number is %llu," "last_sequence is %lu, log_number is %lu,"
"prev_log_number is %llu\n", "prev_log_number is %lu\n",
current.c_str(), manifest_file_number_, next_file_number_, current.c_str(),
last_sequence_, log_number_, prev_log_number_); (unsigned long)manifest_file_number_,
(unsigned long)next_file_number_,
(unsigned long)last_sequence_,
(unsigned long)log_number_,
(unsigned long)prev_log_number_);
} }
return s; return s;
@ -1647,10 +1652,13 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
log_number_ = log_number; log_number_ = log_number;
prev_log_number_ = prev_log_number; prev_log_number_ = prev_log_number;
printf("manifest_file_number %llu next_file_number %llu last_sequence " printf("manifest_file_number %lu next_file_number %lu last_sequence "
"%llu log_number %llu prev_log_number %llu\n", "%lu log_number %lu prev_log_number %lu\n",
manifest_file_number_, next_file_number_, (unsigned long)manifest_file_number_,
last_sequence, log_number, prev_log_number); (unsigned long)next_file_number_,
(unsigned long)last_sequence,
(unsigned long)log_number,
(unsigned long)prev_log_number);
printf("%s \n", v->DebugString(hex).c_str()); printf("%s \n", v->DebugString(hex).c_str());
} }
@ -1864,8 +1872,8 @@ const char* VersionSet::LevelDataSizeSummary(
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size["); int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
for (int i = 0; i < NumberLevels(); i++) { for (int i = 0; i < NumberLevels(); i++) {
int sz = sizeof(scratch->buffer) - len; int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz, "%llu ", int ret = snprintf(scratch->buffer + len, sz, "%lu ",
NumLevelBytes(i)); (unsigned long)NumLevelBytes(i));
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
break; break;
len += ret; len += ret;
@ -1881,9 +1889,11 @@ const char* VersionSet::LevelFileSummary(
FileMetaData* f = current_->files_[level][i]; FileMetaData* f = current_->files_[level][i];
int sz = sizeof(scratch->buffer) - len; int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz, int ret = snprintf(scratch->buffer + len, sz,
"#%llu(seq=%llu,sz=%llu,%d) ", "#%lu(seq=%lu,sz=%lu,%lu) ",
f->number, f->smallest_seqno, (unsigned long)f->number,
f->file_size, f->being_compacted); (unsigned long)f->smallest_seqno,
(unsigned long)f->file_size,
(unsigned long)f->being_compacted);
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
break; break;
len += ret; len += ret;
@ -2221,16 +2231,20 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
start_index = loop; // Consider this as the first candidate. start_index = loop; // Consider this as the first candidate.
break; break;
} }
Log(options_->info_log, "Universal: skipping file %llu[%d] compacted %s", Log(options_->info_log, "Universal: skipping file %lu[%d] compacted %s",
f->number, loop, " cannot be a candidate to reduce size amp.\n"); (unsigned long)f->number,
loop,
" cannot be a candidate to reduce size amp.\n");
f = nullptr; f = nullptr;
} }
if (f == nullptr) { if (f == nullptr) {
return nullptr; // no candidate files return nullptr; // no candidate files
} }
Log(options_->info_log, "Universal: First candidate file %llu[%d] %s", Log(options_->info_log, "Universal: First candidate file %lu[%d] %s",
f->number, start_index, " to reduce size amp.\n"); (unsigned long)f->number,
start_index,
" to reduce size amp.\n");
// keep adding up all the remaining files // keep adding up all the remaining files
for (unsigned int loop = start_index; loop < file_by_time.size() - 1; for (unsigned int loop = start_index; loop < file_by_time.size() - 1;
@ -2239,7 +2253,9 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index]; f = current_->files_[level][index];
if (f->being_compacted) { if (f->being_compacted) {
Log(options_->info_log, Log(options_->info_log,
"Universal: Possible candidate file %llu[%d] %s.", f->number, loop, "Universal: Possible candidate file %lu[%d] %s.",
(unsigned long)f->number,
loop,
" is already being compacted. No size amp reduction possible.\n"); " is already being compacted. No size amp reduction possible.\n");
return nullptr; return nullptr;
} }
@ -2257,15 +2273,17 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
// size amplification = percentage of additional size // size amplification = percentage of additional size
if (candidate_size * 100 < ratio * earliest_file_size) { if (candidate_size * 100 < ratio * earliest_file_size) {
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp not needed. newer-files-total-size %llu " "Universal: size amp not needed. newer-files-total-size %lu "
"earliest-file-size %llu", "earliest-file-size %lu",
candidate_size, earliest_file_size); (unsigned long)candidate_size,
(unsigned long)earliest_file_size);
return nullptr; return nullptr;
} else { } else {
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp needed. newer-files-total-size %llu " "Universal: size amp needed. newer-files-total-size %lu "
"earliest-file-size %llu", "earliest-file-size %lu",
candidate_size, earliest_file_size); (unsigned long)candidate_size,
(unsigned long)earliest_file_size);
} }
assert(start_index >= 0 && start_index < file_by_time.size() - 1); assert(start_index >= 0 && start_index < file_by_time.size() - 1);
@ -2280,8 +2298,10 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index]; f = current_->files_[level][index];
c->inputs_[0].push_back(f); c->inputs_[0].push_back(f);
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp picking file %llu[%d] with size %llu", "Universal: size amp picking file %lu[%d] with size %lu",
f->number, index, f->file_size); (unsigned long)f->number,
index,
(unsigned long)f->file_size);
} }
return c; return c;
} }
@ -2327,8 +2347,8 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
break; break;
} }
Log(options_->info_log, Log(options_->info_log,
"Universal: file %llu[%d] being compacted, skipping", "Universal: file %lu[%d] being compacted, skipping",
f->number, loop); (unsigned long)f->number, loop);
f = nullptr; f = nullptr;
} }
@ -2336,8 +2356,8 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
// first candidate to be compacted. // first candidate to be compacted.
uint64_t candidate_size = f != nullptr? f->file_size : 0; uint64_t candidate_size = f != nullptr? f->file_size : 0;
if (f != nullptr) { if (f != nullptr) {
Log(options_->info_log, "Universal: Possible candidate file %llu[%d].", Log(options_->info_log, "Universal: Possible candidate file %lu[%d].",
f->number, loop); (unsigned long)f->number, loop);
} }
// Check if the suceeding files need compaction. // Check if the suceeding files need compaction.
@ -2370,8 +2390,11 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i]; int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index]; FileMetaData* f = current_->files_[level][index];
Log(options_->info_log, Log(options_->info_log,
"Universal: Skipping file %llu[%d] with size %llu %d\n", "Universal: Skipping file %lu[%d] with size %lu %d\n",
f->number, i, f->file_size, f->being_compacted); (unsigned long)f->number,
i,
(unsigned long)f->file_size,
f->being_compacted);
} }
} }
} }
@ -2405,8 +2428,10 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i]; int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index]; FileMetaData* f = current_->files_[level][index];
c->inputs_[0].push_back(f); c->inputs_[0].push_back(f);
Log(options_->info_log, "Universal: Picking file %llu[%d] with size %llu\n", Log(options_->info_log, "Universal: Picking file %lu[%d] with size %lu\n",
f->number, i, f->file_size); (unsigned long)f->number,
i,
(unsigned long)f->file_size);
} }
return c; return c;
} }
@ -2792,16 +2817,17 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (expanded1.size() == c->inputs_[1].size() && if (expanded1.size() == c->inputs_[1].size() &&
!FilesInCompaction(expanded1)) { !FilesInCompaction(expanded1)) {
Log(options_->info_log, Log(options_->info_log,
"Expanding@%d %d+%d (%llu+%llu bytes) to %d+%d (%llu+%llu bytes)\n", "Expanding@%lu %lu+%lu (%lu+%lu bytes) to %lu+%lu (%lu+%lu bytes)"
level, "\n",
int(c->inputs_[0].size()), (unsigned long)level,
int(c->inputs_[1].size()), (unsigned long)(c->inputs_[0].size()),
inputs0_size, (unsigned long)(c->inputs_[1].size()),
inputs1_size, (unsigned long)inputs0_size,
int(expanded0.size()), (unsigned long)inputs1_size,
int(expanded1.size()), (unsigned long)(expanded0.size()),
expanded0_size, (unsigned long)(expanded1.size()),
inputs1_size); (unsigned long)expanded0_size,
(unsigned long)inputs1_size);
smallest = new_start; smallest = new_start;
largest = new_limit; largest = new_limit;
c->inputs_[0] = expanded0; c->inputs_[0] = expanded0;
@ -3091,9 +3117,9 @@ static void InputSummary(std::vector<FileMetaData*>& files,
int write = 0; int write = 0;
for (unsigned int i = 0; i < files.size(); i++) { for (unsigned int i = 0; i < files.size(); i++) {
int sz = len - write; int sz = len - write;
int ret = snprintf(output + write, sz, "%llu(%llu) ", int ret = snprintf(output + write, sz, "%lu(%lu) ",
files.at(i)->number, (unsigned long)files.at(i)->number,
files.at(i)->file_size); (unsigned long)files.at(i)->file_size);
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
break; break;
write += ret; write += ret;
@ -3102,8 +3128,10 @@ static void InputSummary(std::vector<FileMetaData*>& files,
void Compaction::Summary(char* output, int len) { void Compaction::Summary(char* output, int len) {
int write = snprintf(output, len, int write = snprintf(output, len,
"Base version %llu Base level %d, seek compaction:%d, inputs:", "Base version %lu Base level %d, seek compaction:%d, inputs:",
input_version_->GetVersionNumber(), level_, seek_compaction_); (unsigned long)input_version_->GetVersionNumber(),
level_,
seek_compaction_);
if (write < 0 || write > len) { if (write < 0 || write > len) {
return; return;
} }

@ -165,12 +165,13 @@ std::string HistogramImpl::ToString() const {
if (buckets_[b] <= 0.0) continue; if (buckets_[b] <= 0.0) continue;
sum += buckets_[b]; sum += buckets_[b];
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"[ %7llu, %7llu ) %8llu %7.3f%% %7.3f%% ", "[ %7lu, %7lu ) %8lu %7.3f%% %7.3f%% ",
((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)), // left // left
bucketMapper.BucketLimit(b), // right (unsigned long)((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)),
buckets_[b], // count (unsigned long)bucketMapper.BucketLimit(b), // right
mult * buckets_[b], // percentage buckets_[b], // count
mult * sum); // cumulative percentage (mult * buckets_[b]), // percentage
(mult * sum)); // cumulative percentage
r.append(buf); r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%. // Add hash marks based on percentage; 20 marks for 100%.

@ -508,7 +508,10 @@ void ManifestDumpCommand::DoCommand() {
while ((entry = readdir(d)) != nullptr) { while ((entry = readdir(d)) != nullptr) {
unsigned int match; unsigned int match;
unsigned long long num; unsigned long long num;
if (sscanf(entry->d_name, "MANIFEST-%llu%n", &num, &match) if (sscanf(entry->d_name,
"MANIFEST-%ln%ln",
(unsigned long*)&num,
(unsigned long*)&match)
&& match == strlen(entry->d_name)) { && match == strlen(entry->d_name)) {
if (!found) { if (!found) {
manifestfile = db_path_ + "/" + std::string(entry->d_name); manifestfile = db_path_ + "/" + std::string(entry->d_name);
@ -570,13 +573,15 @@ void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
int ttl_end, int bucket_size, int num_buckets) { int ttl_end, int bucket_size, int num_buckets) {
int time_point = ttl_start; int time_point = ttl_start;
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
fprintf(stdout, "Keys in range %s to %s : %llu\n", fprintf(stdout, "Keys in range %s to %s : %lu\n",
ReadableTime(time_point).c_str(), ReadableTime(time_point).c_str(),
ReadableTime(time_point + bucket_size).c_str(), bucket_counts[i]); ReadableTime(time_point + bucket_size).c_str(),
(unsigned long)bucket_counts[i]);
} }
fprintf(stdout, "Keys in range %s to %s : %llu\n", fprintf(stdout, "Keys in range %s to %s : %lu\n",
ReadableTime(time_point).c_str(), ReadableTime(time_point).c_str(),
ReadableTime(ttl_end).c_str(), bucket_counts[num_buckets - 1]); ReadableTime(ttl_end).c_str(),
(unsigned long)bucket_counts[num_buckets - 1]);
} }
const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only"; const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
@ -1424,7 +1429,7 @@ void ApproxSizeCommand::DoCommand() {
ranges[0] = Range(start_key_, end_key_); ranges[0] = Range(start_key_, end_key_);
uint64_t sizes[1]; uint64_t sizes[1];
db_->GetApproximateSizes(ranges, 1, sizes); db_->GetApproximateSizes(ranges, 1, sizes);
fprintf(stdout, "%llu\n", sizes[0]); fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
/* Weird that GetApproximateSizes() returns void, although documentation /* Weird that GetApproximateSizes() returns void, although documentation
* says that it returns a Status object. * says that it returns a Status object.
if (!st.ok()) { if (!st.ok()) {

@ -161,8 +161,8 @@ Options::Dump(Logger* log) const
Log(log," Options.disableDataSync: %d", disableDataSync); Log(log," Options.disableDataSync: %d", disableDataSync);
Log(log," Options.use_fsync: %d", use_fsync); Log(log," Options.use_fsync: %d", use_fsync);
Log(log," Options.max_log_file_size: %ld", max_log_file_size); Log(log," Options.max_log_file_size: %ld", max_log_file_size);
Log(log,"Options.max_manifest_file_size: %llu", Log(log,"Options.max_manifest_file_size: %lu",
max_manifest_file_size); (unsigned long)max_manifest_file_size);
Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll); Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll);
Log(log," Options.keep_log_file_num: %ld", keep_log_file_num); Log(log," Options.keep_log_file_num: %ld", keep_log_file_num);
Log(log," Options.db_stats_log_interval: %d", Log(log," Options.db_stats_log_interval: %d",
@ -192,16 +192,16 @@ Options::Dump(Logger* log) const
target_file_size_base); target_file_size_base);
Log(log," Options.target_file_size_multiplier: %d", Log(log," Options.target_file_size_multiplier: %d",
target_file_size_multiplier); target_file_size_multiplier);
Log(log," Options.max_bytes_for_level_base: %llu", Log(log," Options.max_bytes_for_level_base: %lu",
max_bytes_for_level_base); (unsigned long)max_bytes_for_level_base);
Log(log," Options.max_bytes_for_level_multiplier: %d", Log(log," Options.max_bytes_for_level_multiplier: %d",
max_bytes_for_level_multiplier); max_bytes_for_level_multiplier);
for (int i = 0; i < num_levels; i++) { for (int i = 0; i < num_levels; i++) {
Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d", Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d",
i, max_bytes_for_level_multiplier_additional[i]); i, max_bytes_for_level_multiplier_additional[i]);
} }
Log(log," Options.max_sequential_skip_in_iterations: %llu", Log(log," Options.max_sequential_skip_in_iterations: %lu",
max_sequential_skip_in_iterations); (unsigned long)max_sequential_skip_in_iterations);
Log(log," Options.expanded_compaction_factor: %d", Log(log," Options.expanded_compaction_factor: %d",
expanded_compaction_factor); expanded_compaction_factor);
Log(log," Options.source_compaction_factor: %d", Log(log," Options.source_compaction_factor: %d",
@ -222,8 +222,8 @@ Options::Dump(Logger* log) const
table_cache_remove_scan_count_limit); table_cache_remove_scan_count_limit);
Log(log," Options.arena_block_size: %ld", Log(log," Options.arena_block_size: %ld",
arena_block_size); arena_block_size);
Log(log," Options.delete_obsolete_files_period_micros: %llu", Log(log," Options.delete_obsolete_files_period_micros: %lu",
delete_obsolete_files_period_micros); (unsigned long)delete_obsolete_files_period_micros);
Log(log," Options.max_background_compactions: %d", Log(log," Options.max_background_compactions: %d",
max_background_compactions); max_background_compactions);
Log(log," Options.max_background_flushes: %d", Log(log," Options.max_background_flushes: %d",
@ -236,10 +236,10 @@ Options::Dump(Logger* log) const
rate_limit_delay_max_milliseconds); rate_limit_delay_max_milliseconds);
Log(log," Options.disable_auto_compactions: %d", Log(log," Options.disable_auto_compactions: %d",
disable_auto_compactions); disable_auto_compactions);
Log(log," Options.WAL_ttl_seconds: %llu", Log(log," Options.WAL_ttl_seconds: %lu",
WAL_ttl_seconds); (unsigned long)WAL_ttl_seconds);
Log(log," Options.WAL_size_limit_MB: %llu", Log(log," Options.WAL_size_limit_MB: %lu",
WAL_size_limit_MB); (unsigned long)WAL_size_limit_MB);
Log(log," Options.manifest_preallocation_size: %ld", Log(log," Options.manifest_preallocation_size: %ld",
manifest_preallocation_size); manifest_preallocation_size);
Log(log," Options.purge_redundant_kvs_while_flush: %d", Log(log," Options.purge_redundant_kvs_while_flush: %d",
@ -264,8 +264,8 @@ Options::Dump(Logger* log) const
access_hints[access_hint_on_compaction_start]); access_hints[access_hint_on_compaction_start]);
Log(log," Options.use_adaptive_mutex: %d", Log(log," Options.use_adaptive_mutex: %d",
use_adaptive_mutex); use_adaptive_mutex);
Log(log," Options.bytes_per_sync: %llu", Log(log," Options.bytes_per_sync: %lu",
bytes_per_sync); (unsigned long)bytes_per_sync);
Log(log," Options.filter_deletes: %d", Log(log," Options.filter_deletes: %d",
filter_deletes); filter_deletes);
Log(log," Options.compaction_style: %d", Log(log," Options.compaction_style: %d",

Loading…
Cancel
Save