diff --git a/CMakeLists.txt b/CMakeLists.txt index ca570ccd2..a005d26d1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,7 +60,7 @@ add_custom_target(GenerateBuildVersion DEPENDS ${BUILD_VERSION_CC}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4800 /wd4996") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4800 /wd4996 /wd4351") # Used to run CI build and tests so we can run faster set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize diff --git a/db/column_family.cc b/db/column_family.cc index 0b74e4ebe..474fc91d9 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -481,7 +481,8 @@ std::unique_ptr SetupDelay( // because of hitting the max write buffer number. if (prev_compaction_neeed_bytes > 0 && prev_compaction_neeed_bytes <= compaction_needed_bytes) { - write_rate /= kSlowdownRatio; + write_rate = static_cast(static_cast(write_rate) / + kSlowdownRatio); if (write_rate < kMinWriteRate) { write_rate = kMinWriteRate; } @@ -489,7 +490,8 @@ std::unique_ptr SetupDelay( // We are speeding up by ratio of kSlowdownRatio when we have paid // compaction debt. But we'll never speed up to faster than the write rate // given by users. - write_rate *= kSlowdownRatio; + write_rate = static_cast(static_cast(write_rate) * + kSlowdownRatio); if (write_rate > max_write_rate) { write_rate = max_write_rate; } diff --git a/db/db_impl.cc b/db/db_impl.cc index 020baa7e1..add64668f 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -580,8 +580,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, &files); // Ignore errors for (std::string file : files) { // TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes - job_context->full_scan_candidate_files.emplace_back("/" + file, - path_id); + job_context->full_scan_candidate_files.emplace_back( + "/" + file, static_cast(path_id)); } } diff --git a/db/db_test.cc b/db/db_test.cc index f7900b544..8d37d1c44 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -8058,7 +8058,7 @@ TEST_F(DBTest, OptimizeFiltersForHits) { MoveFilesToLevel(7 /* level */, 1 /* column family index */); std::string value = Get(1, Key(0)); - long prev_cache_filter_hits = + uint64_t prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); value = Get(1, Key(0)); ASSERT_EQ(prev_cache_filter_hits + 1, @@ -8087,7 +8087,7 @@ TEST_F(DBTest, OptimizeFiltersForHits) { ReopenWithColumnFamilies({"default", "mypikachu"}, options); - long prev_cache_filter_misses = + uint64_t prev_cache_filter_misses = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); Get(1, Key(0)); @@ -9096,7 +9096,8 @@ TEST_F(DBTest, DelayedWriteRate) { dbfull()->TEST_WaitForFlushMemTable(); estimated_sleep_time += size_memtable * 1000000u / cur_rate; // Slow down twice. One for memtable switch and one for flush finishes. - cur_rate /= kSlowdownRatio * kSlowdownRatio; + cur_rate = static_cast(static_cast(cur_rate) / + kSlowdownRatio / kSlowdownRatio); } // Estimate the total sleep time fall into the rough range. ASSERT_GT(env_->addon_time_.load(), @@ -10324,7 +10325,7 @@ TEST_F(DBTest, PinnedDataIteratorRandomized) { Random rnd(301); int puts = 100000; - int key_pool = puts * 0.7; + int key_pool = static_cast(puts * 0.7); int key_size = 100; int val_size = 1000; int seeks_percentage = 20; // 20% of keys will be used to test seek() @@ -10353,19 +10354,19 @@ TEST_F(DBTest, PinnedDataIteratorRandomized) { // Insert data to true_data map and to DB true_data[k] = v; - if (rnd.OneIn(100.0 / merge_percentage)) { + if (rnd.OneIn(static_cast(100.0 / merge_percentage))) { ASSERT_OK(db_->Merge(WriteOptions(), k, v)); } else { ASSERT_OK(Put(k, v)); } // Pick random keys to be used to test Seek() - if (rnd.OneIn(100.0 / seeks_percentage)) { + if (rnd.OneIn(static_cast(100.0 / seeks_percentage))) { random_keys.push_back(k); } // Delete some random keys - if (rnd.OneIn(100.0 / delete_percentage)) { + if (rnd.OneIn(static_cast(100.0 / delete_percentage))) { deleted_keys.push_back(k); true_data.erase(k); ASSERT_OK(Delete(k)); diff --git a/db/repair.cc b/db/repair.cc index 1805059a7..f4758d0cd 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -190,7 +190,8 @@ class Repairer { assert(path_id == 0); logs_.push_back(number); } else if (type == kTableFile) { - table_fds_.emplace_back(number, path_id, 0); + table_fds_.emplace_back(number, static_cast(path_id), + 0); } else { // Ignore other files }