diff --git a/db/compaction_job.cc b/db/compaction_job.cc index 8ad4b99a5..ea052b84f 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -601,10 +601,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { if (measure_io_stats_) { prev_perf_level = GetPerfLevel(); SetPerfLevel(PerfLevel::kEnableTime); - prev_write_nanos = iostats_context.write_nanos; - prev_fsync_nanos = iostats_context.fsync_nanos; - prev_range_sync_nanos = iostats_context.range_sync_nanos; - prev_prepare_write_nanos = iostats_context.prepare_write_nanos; + prev_write_nanos = IOSTATS(write_nanos); + prev_fsync_nanos = IOSTATS(fsync_nanos); + prev_range_sync_nanos = IOSTATS(range_sync_nanos); + prev_prepare_write_nanos = IOSTATS(prepare_write_nanos); } ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); @@ -728,13 +728,13 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { if (measure_io_stats_) { sub_compact->compaction_job_stats.file_write_nanos += - iostats_context.write_nanos - prev_write_nanos; + IOSTATS(write_nanos) - prev_write_nanos; sub_compact->compaction_job_stats.file_fsync_nanos += - iostats_context.fsync_nanos - prev_fsync_nanos; + IOSTATS(fsync_nanos) - prev_fsync_nanos; sub_compact->compaction_job_stats.file_range_sync_nanos += - iostats_context.range_sync_nanos - prev_range_sync_nanos; + IOSTATS(range_sync_nanos) - prev_range_sync_nanos; sub_compact->compaction_job_stats.file_prepare_write_nanos += - iostats_context.prepare_write_nanos - prev_prepare_write_nanos; + IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos; if (prev_perf_level != PerfLevel::kEnableTime) { SetPerfLevel(prev_perf_level); } diff --git a/db/version_set.cc b/db/version_set.cc index e52ae1396..34e67aa0f 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1097,7 +1097,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded( // We keep doing it to Level 2, 3, etc, until the last level and return the // accumulated bytes. - size_t bytes_compact_to_next_level = 0; + uint64_t bytes_compact_to_next_level = 0; // Level 0 bool level0_compact_triggered = false; if (static_cast(files_[0].size()) > @@ -1113,7 +1113,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded( // Level 1 and up. for (int level = base_level(); level <= MaxInputLevel(); level++) { - size_t level_size = 0; + uint64_t level_size = 0; for (auto* f : files_[level]) { level_size += f->fd.GetFileSize(); } @@ -1124,7 +1124,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded( // Add size added by previous compaction level_size += bytes_compact_to_next_level; bytes_compact_to_next_level = 0; - size_t level_target = MaxBytesForLevel(level); + uint64_t level_target = MaxBytesForLevel(level); if (level_size > level_target) { bytes_compact_to_next_level = level_size - level_target; // Simplify to assume the actual compaction fan-out ratio is always diff --git a/util/env_posix.cc b/util/env_posix.cc index 3c264e1cf..7d241ca63 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -317,7 +317,7 @@ class PosixMmapReadableFile: public RandomAccessFile { *result = Slice(); return IOError(filename_, EINVAL); } else if (offset + n > length_) { - n = length_ - offset; + n = static_cast(length_ - offset); } *result = Slice(reinterpret_cast(mmapped_region_) + offset, n); return s; diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index 1c262fde7..ff459262c 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -412,8 +412,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { // if offset between [buffer_offset_, buffer_offset_ + buffer_len> if (offset >= buffer_offset_ && offset < buffer_len_ + buffer_offset_) { uint64_t offset_in_buffer = offset - buffer_offset_; - copied = std::min(static_cast(buffer_len_) - offset_in_buffer, - static_cast(n)); + copied = std::min(buffer_len_ - static_cast(offset_in_buffer), n); memcpy(scratch, buffer_.get() + offset_in_buffer, copied); if (copied == n) { // fully cached diff --git a/util/file_util.cc b/util/file_util.cc index 1bcf3ed48..d4f7b4004 100644 --- a/util/file_util.cc +++ b/util/file_util.cc @@ -49,8 +49,7 @@ Status CopyFile(Env* env, const std::string& source, char buffer[4096]; Slice slice; while (size > 0) { - uint64_t bytes_to_read = - std::min(static_cast(sizeof(buffer)), size); + size_t bytes_to_read = std::min(sizeof(buffer), static_cast(size)); if (s.ok()) { s = src_reader->Read(bytes_to_read, &slice, buffer); } diff --git a/util/testutil.h b/util/testutil.h index 0bde21c31..0373532a8 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -190,7 +190,7 @@ class StringSink: public WritableFile { const std::string& contents() const { return contents_; } virtual Status Truncate(uint64_t size) override { - contents_.resize(size); + contents_.resize(static_cast(size)); return Status::OK(); } virtual Status Close() override { return Status::OK(); } @@ -243,13 +243,13 @@ class StringSource: public RandomAccessFile { return Status::InvalidArgument("invalid Read offset"); } if (offset + n > contents_.size()) { - n = contents_.size() - offset; + n = contents_.size() - static_cast(offset); } if (!mmap_) { - memcpy(scratch, &contents_[offset], n); + memcpy(scratch, &contents_[static_cast(offset)], n); *result = Slice(scratch, n); } else { - *result = Slice(&contents_[offset], n); + *result = Slice(&contents_[static_cast(offset)], n); } return Status::OK(); }