Fix iOS build

Summary: We don't yet have a CI build for iOS, so our iOS compile gets broken sometimes. Most of the errors are from assumption that size_t is 64-bit, while it's actually 32-bit on some (all?) iOS platforms. This diff fixes the compile.

Test Plan:
TARGET_OS=IOS make static_lib

Observe there are no warnings

Reviewers: sdong, anthony, IslamAbdelRahman, kradhakrishnan, yhchiang

Reviewed By: yhchiang

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D49029
main
Igor Canadi 9 years ago
parent 32c291e3c9
commit 4e07c99a9a
  1. 16
      db/compaction_job.cc
  2. 6
      db/version_set.cc
  3. 2
      util/env_posix.cc
  4. 3
      util/file_reader_writer.cc
  5. 3
      util/file_util.cc
  6. 8
      util/testutil.h

@ -601,10 +601,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
if (measure_io_stats_) { if (measure_io_stats_) {
prev_perf_level = GetPerfLevel(); prev_perf_level = GetPerfLevel();
SetPerfLevel(PerfLevel::kEnableTime); SetPerfLevel(PerfLevel::kEnableTime);
prev_write_nanos = iostats_context.write_nanos; prev_write_nanos = IOSTATS(write_nanos);
prev_fsync_nanos = iostats_context.fsync_nanos; prev_fsync_nanos = IOSTATS(fsync_nanos);
prev_range_sync_nanos = iostats_context.range_sync_nanos; prev_range_sync_nanos = IOSTATS(range_sync_nanos);
prev_prepare_write_nanos = iostats_context.prepare_write_nanos; prev_prepare_write_nanos = IOSTATS(prepare_write_nanos);
} }
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
@ -728,13 +728,13 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
if (measure_io_stats_) { if (measure_io_stats_) {
sub_compact->compaction_job_stats.file_write_nanos += sub_compact->compaction_job_stats.file_write_nanos +=
iostats_context.write_nanos - prev_write_nanos; IOSTATS(write_nanos) - prev_write_nanos;
sub_compact->compaction_job_stats.file_fsync_nanos += sub_compact->compaction_job_stats.file_fsync_nanos +=
iostats_context.fsync_nanos - prev_fsync_nanos; IOSTATS(fsync_nanos) - prev_fsync_nanos;
sub_compact->compaction_job_stats.file_range_sync_nanos += sub_compact->compaction_job_stats.file_range_sync_nanos +=
iostats_context.range_sync_nanos - prev_range_sync_nanos; IOSTATS(range_sync_nanos) - prev_range_sync_nanos;
sub_compact->compaction_job_stats.file_prepare_write_nanos += sub_compact->compaction_job_stats.file_prepare_write_nanos +=
iostats_context.prepare_write_nanos - prev_prepare_write_nanos; IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos;
if (prev_perf_level != PerfLevel::kEnableTime) { if (prev_perf_level != PerfLevel::kEnableTime) {
SetPerfLevel(prev_perf_level); SetPerfLevel(prev_perf_level);
} }

@ -1097,7 +1097,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// We keep doing it to Level 2, 3, etc, until the last level and return the // We keep doing it to Level 2, 3, etc, until the last level and return the
// accumulated bytes. // accumulated bytes.
size_t bytes_compact_to_next_level = 0; uint64_t bytes_compact_to_next_level = 0;
// Level 0 // Level 0
bool level0_compact_triggered = false; bool level0_compact_triggered = false;
if (static_cast<int>(files_[0].size()) > if (static_cast<int>(files_[0].size()) >
@ -1113,7 +1113,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// Level 1 and up. // Level 1 and up.
for (int level = base_level(); level <= MaxInputLevel(); level++) { for (int level = base_level(); level <= MaxInputLevel(); level++) {
size_t level_size = 0; uint64_t level_size = 0;
for (auto* f : files_[level]) { for (auto* f : files_[level]) {
level_size += f->fd.GetFileSize(); level_size += f->fd.GetFileSize();
} }
@ -1124,7 +1124,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// Add size added by previous compaction // Add size added by previous compaction
level_size += bytes_compact_to_next_level; level_size += bytes_compact_to_next_level;
bytes_compact_to_next_level = 0; bytes_compact_to_next_level = 0;
size_t level_target = MaxBytesForLevel(level); uint64_t level_target = MaxBytesForLevel(level);
if (level_size > level_target) { if (level_size > level_target) {
bytes_compact_to_next_level = level_size - level_target; bytes_compact_to_next_level = level_size - level_target;
// Simplify to assume the actual compaction fan-out ratio is always // Simplify to assume the actual compaction fan-out ratio is always

@ -317,7 +317,7 @@ class PosixMmapReadableFile: public RandomAccessFile {
*result = Slice(); *result = Slice();
return IOError(filename_, EINVAL); return IOError(filename_, EINVAL);
} else if (offset + n > length_) { } else if (offset + n > length_) {
n = length_ - offset; n = static_cast<size_t>(length_ - offset);
} }
*result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n); *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
return s; return s;

@ -412,8 +412,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
// if offset between [buffer_offset_, buffer_offset_ + buffer_len> // if offset between [buffer_offset_, buffer_offset_ + buffer_len>
if (offset >= buffer_offset_ && offset < buffer_len_ + buffer_offset_) { if (offset >= buffer_offset_ && offset < buffer_len_ + buffer_offset_) {
uint64_t offset_in_buffer = offset - buffer_offset_; uint64_t offset_in_buffer = offset - buffer_offset_;
copied = std::min(static_cast<uint64_t>(buffer_len_) - offset_in_buffer, copied = std::min(buffer_len_ - static_cast<size_t>(offset_in_buffer), n);
static_cast<uint64_t>(n));
memcpy(scratch, buffer_.get() + offset_in_buffer, copied); memcpy(scratch, buffer_.get() + offset_in_buffer, copied);
if (copied == n) { if (copied == n) {
// fully cached // fully cached

@ -49,8 +49,7 @@ Status CopyFile(Env* env, const std::string& source,
char buffer[4096]; char buffer[4096];
Slice slice; Slice slice;
while (size > 0) { while (size > 0) {
uint64_t bytes_to_read = size_t bytes_to_read = std::min(sizeof(buffer), static_cast<size_t>(size));
std::min(static_cast<uint64_t>(sizeof(buffer)), size);
if (s.ok()) { if (s.ok()) {
s = src_reader->Read(bytes_to_read, &slice, buffer); s = src_reader->Read(bytes_to_read, &slice, buffer);
} }

@ -190,7 +190,7 @@ class StringSink: public WritableFile {
const std::string& contents() const { return contents_; } const std::string& contents() const { return contents_; }
virtual Status Truncate(uint64_t size) override { virtual Status Truncate(uint64_t size) override {
contents_.resize(size); contents_.resize(static_cast<size_t>(size));
return Status::OK(); return Status::OK();
} }
virtual Status Close() override { return Status::OK(); } virtual Status Close() override { return Status::OK(); }
@ -243,13 +243,13 @@ class StringSource: public RandomAccessFile {
return Status::InvalidArgument("invalid Read offset"); return Status::InvalidArgument("invalid Read offset");
} }
if (offset + n > contents_.size()) { if (offset + n > contents_.size()) {
n = contents_.size() - offset; n = contents_.size() - static_cast<size_t>(offset);
} }
if (!mmap_) { if (!mmap_) {
memcpy(scratch, &contents_[offset], n); memcpy(scratch, &contents_[static_cast<size_t>(offset)], n);
*result = Slice(scratch, n); *result = Slice(scratch, n);
} else { } else {
*result = Slice(&contents_[offset], n); *result = Slice(&contents_[static_cast<size_t>(offset)], n);
} }
return Status::OK(); return Status::OK();
} }

Loading…
Cancel
Save