Fixed sign-comparison in rocksdb code-base and fixed Makefile

Summary: Makefile had options to ignore sign-comparisons and unused-parameters, which should be there. Also fixed the specific errors in the code-base

Test Plan: make

Reviewers: chip, dhruba

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D9531
main
Mayank Agarwal 12 years ago
parent 72d14eafd3
commit 487168cdcf
  1. 2
      Makefile
  2. 8
      db/db_bench.cc
  3. 5
      db/db_impl.cc
  4. 2
      db/log_format.h
  5. 9
      db/version_set.cc
  6. 2
      db/version_set.h
  7. 2
      include/leveldb/options.h
  8. 6
      util/histogram.cc
  9. 2
      util/options.cc

@ -18,7 +18,7 @@ $(shell ./build_detect_platform build_config.mk)
# this file is generated by the previous line to set build flags and sources # this file is generated by the previous line to set build flags and sources
include build_config.mk include build_config.mk
WARNING_FLAGS = -Wall -Werror -Wno-unused-parameter -Wno-sign-compare WARNING_FLAGS = -Wall -Werror
CFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) CFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
CXXFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -std=gnu++0x CXXFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -std=gnu++0x

@ -183,7 +183,7 @@ static bool FLAGS_use_snapshot = false;
static bool FLAGS_get_approx = false; static bool FLAGS_get_approx = false;
// The total number of levels // The total number of levels
static unsigned int FLAGS_num_levels = 7; static int FLAGS_num_levels = 7;
// Target level-0 file size for compaction // Target level-0 file size for compaction
static int FLAGS_target_file_size_base = 2 * 1048576; static int FLAGS_target_file_size_base = 2 * 1048576;
@ -289,7 +289,7 @@ class RandomGenerator {
// large enough to serve all typical value sizes we want to write. // large enough to serve all typical value sizes we want to write.
Random rnd(301); Random rnd(301);
std::string piece; std::string piece;
while (data_.size() < std::max(1048576, FLAGS_value_size)) { while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) {
// Add a short fragment that is as compressible as specified // Add a short fragment that is as compressible as specified
// by FLAGS_compression_ratio. // by FLAGS_compression_ratio.
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
@ -298,7 +298,7 @@ class RandomGenerator {
pos_ = 0; pos_ = 0;
} }
Slice Generate(int len) { Slice Generate(unsigned int len) {
if (pos_ + len > data_.size()) { if (pos_ + len > data_.size()) {
pos_ = 0; pos_ = 0;
assert(len < data_.size()); assert(len < data_.size());
@ -1084,7 +1084,7 @@ unique_ptr<char []> GenerateKeyFromInt(int v)
for (int i = 0; i < FLAGS_min_level_to_compress; i++) { for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
options.compression_per_level[i] = kNoCompression; options.compression_per_level[i] = kNoCompression;
} }
for (unsigned int i = FLAGS_min_level_to_compress; for (int i = FLAGS_min_level_to_compress;
i < FLAGS_num_levels; i++) { i < FLAGS_num_levels; i++) {
options.compression_per_level[i] = FLAGS_compression_type; options.compression_per_level[i] = FLAGS_compression_type;
} }

@ -413,7 +413,7 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
options_.db_log_dir.empty()) { options_.db_log_dir.empty()) {
std::sort(old_log_files.begin(), old_log_files.end()); std::sort(old_log_files.begin(), old_log_files.end());
size_t end = old_log_file_count - options_.keep_log_file_num; size_t end = old_log_file_count - options_.keep_log_file_num;
for (int i = 0; i <= end; i++) { for (unsigned int i = 0; i <= end; i++) {
std::string& to_delete = old_log_files.at(i); std::string& to_delete = old_log_files.at(i);
// Log(options_.info_log, "Delete type=%d %s\n", // Log(options_.info_log, "Delete type=%d %s\n",
// int(kInfoLogFile), to_delete.c_str()); // int(kInfoLogFile), to_delete.c_str());
@ -2158,7 +2158,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
stall_leveln_slowdown_[max_level] += delayed; stall_leveln_slowdown_[max_level] += delayed;
// Make sure the following value doesn't round to zero. // Make sure the following value doesn't round to zero.
rate_limit_delay_millis += std::max((delayed / 1000), (uint64_t) 1); rate_limit_delay_millis += std::max((delayed / 1000), (uint64_t) 1);
if (rate_limit_delay_millis >= options_.rate_limit_delay_milliseconds) { if (rate_limit_delay_millis >=
(unsigned)options_.rate_limit_delay_milliseconds) {
allow_rate_limit_delay = false; allow_rate_limit_delay = false;
} }
// Log(options_.info_log, // Log(options_.info_log,

@ -24,7 +24,7 @@ enum RecordType {
}; };
static const int kMaxRecordType = kLastType; static const int kMaxRecordType = kLastType;
static const int kBlockSize = 32768; static const unsigned int kBlockSize = 32768;
// Header is checksum (4 bytes), type (1 byte), length (2 bytes). // Header is checksum (4 bytes), type (1 byte), length (2 bytes).
static const int kHeaderSize = 4 + 1 + 2; static const int kHeaderSize = 4 + 1 + 2;

@ -559,7 +559,7 @@ void Version::ExtendOverlappingInputs(
const Slice& user_begin, const Slice& user_begin,
const Slice& user_end, const Slice& user_end,
std::vector<FileMetaData*>* inputs, std::vector<FileMetaData*>* inputs,
int midIndex) { unsigned int midIndex) {
const Comparator* user_cmp = vset_->icmp_.user_comparator(); const Comparator* user_cmp = vset_->icmp_.user_comparator();
#ifndef NDEBUG #ifndef NDEBUG
@ -736,13 +736,16 @@ class VersionSet::Builder {
#endif #endif
} }
void CheckConsistencyForDeletes(VersionEdit* edit, int number, int level) { void CheckConsistencyForDeletes(
VersionEdit* edit,
unsigned int number,
int level) {
#ifndef NDEBUG #ifndef NDEBUG
// a file to be deleted better exist in the previous version // a file to be deleted better exist in the previous version
bool found = false; bool found = false;
for (int l = 0; !found && l < edit->number_levels_; l++) { for (int l = 0; !found && l < edit->number_levels_; l++) {
const std::vector<FileMetaData*>& base_files = base_->files_[l]; const std::vector<FileMetaData*>& base_files = base_->files_[l];
for (int i = 0; i < base_files.size(); i++) { for (unsigned int i = 0; i < base_files.size(); i++) {
FileMetaData* f = base_files[i]; FileMetaData* f = base_files[i];
if (f->number == number) { if (f->number == number) {
found = true; found = true;

@ -106,7 +106,7 @@ class Version {
const Slice& begin, // nullptr means before all keys const Slice& begin, // nullptr means before all keys
const Slice& end, // nullptr means after all keys const Slice& end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs, std::vector<FileMetaData*>* inputs,
int index); // start extending from this index unsigned int index); // start extending from this index
// Returns true iff some file in the specified level overlaps // Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key]. // some part of [*smallest_user_key,*largest_user_key].

@ -325,7 +325,7 @@ struct Options {
double rate_limit; double rate_limit;
// Max time a put will be stalled when rate_limit is enforced // Max time a put will be stalled when rate_limit is enforced
int rate_limit_delay_milliseconds; unsigned int rate_limit_delay_milliseconds;
// manifest file is rolled over on reaching this limit. // manifest file is rolled over on reaching this limit.
// The older manifest file be deleted. // The older manifest file be deleted.

@ -95,7 +95,7 @@ void HistogramImpl::Merge(const HistogramImpl& other) {
num_ += other.num_; num_ += other.num_;
sum_ += other.sum_; sum_ += other.sum_;
sum_squares_ += other.sum_squares_; sum_squares_ += other.sum_squares_;
for (int b = 0; b < bucketMapper.BucketCount(); b++) { for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
buckets_[b] += other.buckets_[b]; buckets_[b] += other.buckets_[b];
} }
} }
@ -107,7 +107,7 @@ double HistogramImpl::Median() const {
double HistogramImpl::Percentile(double p) const { double HistogramImpl::Percentile(double p) const {
double threshold = num_ * (p / 100.0); double threshold = num_ * (p / 100.0);
double sum = 0; double sum = 0;
for (int b = 0; b < bucketMapper.BucketCount(); b++) { for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
sum += buckets_[b]; sum += buckets_[b];
if (sum >= threshold) { if (sum >= threshold) {
// Scale linearly within this bucket // Scale linearly within this bucket
@ -158,7 +158,7 @@ std::string HistogramImpl::ToString() const {
r.append("------------------------------------------------------\n"); r.append("------------------------------------------------------\n");
const double mult = 100.0 / num_; const double mult = 100.0 / num_;
double sum = 0; double sum = 0;
for (int b = 0; b < bucketMapper.BucketCount(); b++) { for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
if (buckets_[b] <= 0.0) continue; if (buckets_[b] <= 0.0) continue;
sum += buckets_[b]; sum += buckets_[b];
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),

@ -84,7 +84,7 @@ Options::Dump(Logger* log) const
Log(log," Options.block_size: %zd", block_size); Log(log," Options.block_size: %zd", block_size);
Log(log," Options.block_restart_interval: %d", block_restart_interval); Log(log," Options.block_restart_interval: %d", block_restart_interval);
if (!compression_per_level.empty()) { if (!compression_per_level.empty()) {
for (int i = 0; i < compression_per_level.size(); i++) { for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Log(log," Options.compression[%d]: %d", Log(log," Options.compression[%d]: %d",
i, compression_per_level[i]); i, compression_per_level[i]);
} }

Loading…
Cancel
Save