Fixed sign-comparison in rocksdb code-base and fixed Makefile

Summary: Makefile had options to ignore sign-comparisons and unused-parameters, which should be there. Also fixed the specific errors in the code-base

Test Plan: make

Reviewers: chip, dhruba

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D9531
main
Mayank Agarwal 12 years ago
parent 72d14eafd3
commit 487168cdcf
  1. 2
      Makefile
  2. 8
      db/db_bench.cc
  3. 5
      db/db_impl.cc
  4. 2
      db/log_format.h
  5. 9
      db/version_set.cc
  6. 2
      db/version_set.h
  7. 2
      include/leveldb/options.h
  8. 6
      util/histogram.cc
  9. 2
      util/options.cc

@ -18,7 +18,7 @@ $(shell ./build_detect_platform build_config.mk)
# this file is generated by the previous line to set build flags and sources
include build_config.mk
WARNING_FLAGS = -Wall -Werror -Wno-unused-parameter -Wno-sign-compare
WARNING_FLAGS = -Wall -Werror
CFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
CXXFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -std=gnu++0x

@ -183,7 +183,7 @@ static bool FLAGS_use_snapshot = false;
static bool FLAGS_get_approx = false;
// The total number of levels
static unsigned int FLAGS_num_levels = 7;
static int FLAGS_num_levels = 7;
// Target level-0 file size for compaction
static int FLAGS_target_file_size_base = 2 * 1048576;
@ -289,7 +289,7 @@ class RandomGenerator {
// large enough to serve all typical value sizes we want to write.
Random rnd(301);
std::string piece;
while (data_.size() < std::max(1048576, FLAGS_value_size)) {
while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) {
// Add a short fragment that is as compressible as specified
// by FLAGS_compression_ratio.
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
@ -298,7 +298,7 @@ class RandomGenerator {
pos_ = 0;
}
Slice Generate(int len) {
Slice Generate(unsigned int len) {
if (pos_ + len > data_.size()) {
pos_ = 0;
assert(len < data_.size());
@ -1084,7 +1084,7 @@ unique_ptr<char []> GenerateKeyFromInt(int v)
for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
options.compression_per_level[i] = kNoCompression;
}
for (unsigned int i = FLAGS_min_level_to_compress;
for (int i = FLAGS_min_level_to_compress;
i < FLAGS_num_levels; i++) {
options.compression_per_level[i] = FLAGS_compression_type;
}

@ -413,7 +413,7 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
options_.db_log_dir.empty()) {
std::sort(old_log_files.begin(), old_log_files.end());
size_t end = old_log_file_count - options_.keep_log_file_num;
for (int i = 0; i <= end; i++) {
for (unsigned int i = 0; i <= end; i++) {
std::string& to_delete = old_log_files.at(i);
// Log(options_.info_log, "Delete type=%d %s\n",
// int(kInfoLogFile), to_delete.c_str());
@ -2158,7 +2158,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
stall_leveln_slowdown_[max_level] += delayed;
// Make sure the following value doesn't round to zero.
rate_limit_delay_millis += std::max((delayed / 1000), (uint64_t) 1);
if (rate_limit_delay_millis >= options_.rate_limit_delay_milliseconds) {
if (rate_limit_delay_millis >=
(unsigned)options_.rate_limit_delay_milliseconds) {
allow_rate_limit_delay = false;
}
// Log(options_.info_log,

@ -24,7 +24,7 @@ enum RecordType {
};
static const int kMaxRecordType = kLastType;
static const int kBlockSize = 32768;
static const unsigned int kBlockSize = 32768;
// Header is checksum (4 bytes), type (1 byte), length (2 bytes).
static const int kHeaderSize = 4 + 1 + 2;

@ -559,7 +559,7 @@ void Version::ExtendOverlappingInputs(
const Slice& user_begin,
const Slice& user_end,
std::vector<FileMetaData*>* inputs,
int midIndex) {
unsigned int midIndex) {
const Comparator* user_cmp = vset_->icmp_.user_comparator();
#ifndef NDEBUG
@ -736,13 +736,16 @@ class VersionSet::Builder {
#endif
}
void CheckConsistencyForDeletes(VersionEdit* edit, int number, int level) {
void CheckConsistencyForDeletes(
VersionEdit* edit,
unsigned int number,
int level) {
#ifndef NDEBUG
// a file to be deleted better exist in the previous version
bool found = false;
for (int l = 0; !found && l < edit->number_levels_; l++) {
const std::vector<FileMetaData*>& base_files = base_->files_[l];
for (int i = 0; i < base_files.size(); i++) {
for (unsigned int i = 0; i < base_files.size(); i++) {
FileMetaData* f = base_files[i];
if (f->number == number) {
found = true;

@ -106,7 +106,7 @@ class Version {
const Slice& begin, // nullptr means before all keys
const Slice& end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs,
int index); // start extending from this index
unsigned int index); // start extending from this index
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].

@ -325,7 +325,7 @@ struct Options {
double rate_limit;
// Max time a put will be stalled when rate_limit is enforced
int rate_limit_delay_milliseconds;
unsigned int rate_limit_delay_milliseconds;
// manifest file is rolled over on reaching this limit.
// The older manifest file be deleted.

@ -95,7 +95,7 @@ void HistogramImpl::Merge(const HistogramImpl& other) {
num_ += other.num_;
sum_ += other.sum_;
sum_squares_ += other.sum_squares_;
for (int b = 0; b < bucketMapper.BucketCount(); b++) {
for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
buckets_[b] += other.buckets_[b];
}
}
@ -107,7 +107,7 @@ double HistogramImpl::Median() const {
double HistogramImpl::Percentile(double p) const {
double threshold = num_ * (p / 100.0);
double sum = 0;
for (int b = 0; b < bucketMapper.BucketCount(); b++) {
for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
@ -158,7 +158,7 @@ std::string HistogramImpl::ToString() const {
r.append("------------------------------------------------------\n");
const double mult = 100.0 / num_;
double sum = 0;
for (int b = 0; b < bucketMapper.BucketCount(); b++) {
for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
snprintf(buf, sizeof(buf),

@ -84,7 +84,7 @@ Options::Dump(Logger* log) const
Log(log," Options.block_size: %zd", block_size);
Log(log," Options.block_restart_interval: %d", block_restart_interval);
if (!compression_per_level.empty()) {
for (int i = 0; i < compression_per_level.size(); i++) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Log(log," Options.compression[%d]: %d",
i, compression_per_level[i]);
}

Loading…
Cancel
Save