Disable Visual Studio Warning C4351

Currently Windows build is broken because of Warning C4351. Disable the warning before figuring out the right way to fix it.
main
Siying Dong 10 years ago committed by sdong
parent fcafac053f
commit 22c0ed8a5f
  1. 2
      CMakeLists.txt
  2. 6
      db/column_family.cc
  3. 4
      db/db_impl.cc
  4. 15
      db/db_test.cc
  5. 3
      db/repair.cc

@ -60,7 +60,7 @@ add_custom_target(GenerateBuildVersion DEPENDS ${BUILD_VERSION_CC})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4800 /wd4996") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4800 /wd4996 /wd4351")
# Used to run CI build and tests so we can run faster # Used to run CI build and tests so we can run faster
set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize

@ -481,7 +481,8 @@ std::unique_ptr<WriteControllerToken> SetupDelay(
// because of hitting the max write buffer number. // because of hitting the max write buffer number.
if (prev_compaction_neeed_bytes > 0 && if (prev_compaction_neeed_bytes > 0 &&
prev_compaction_neeed_bytes <= compaction_needed_bytes) { prev_compaction_neeed_bytes <= compaction_needed_bytes) {
write_rate /= kSlowdownRatio; write_rate = static_cast<uint64_t>(static_cast<double>(write_rate) /
kSlowdownRatio);
if (write_rate < kMinWriteRate) { if (write_rate < kMinWriteRate) {
write_rate = kMinWriteRate; write_rate = kMinWriteRate;
} }
@ -489,7 +490,8 @@ std::unique_ptr<WriteControllerToken> SetupDelay(
// We are speeding up by ratio of kSlowdownRatio when we have paid // We are speeding up by ratio of kSlowdownRatio when we have paid
// compaction debt. But we'll never speed up to faster than the write rate // compaction debt. But we'll never speed up to faster than the write rate
// given by users. // given by users.
write_rate *= kSlowdownRatio; write_rate = static_cast<uint64_t>(static_cast<double>(write_rate) *
kSlowdownRatio);
if (write_rate > max_write_rate) { if (write_rate > max_write_rate) {
write_rate = max_write_rate; write_rate = max_write_rate;
} }

@ -580,8 +580,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
&files); // Ignore errors &files); // Ignore errors
for (std::string file : files) { for (std::string file : files) {
// TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes // TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes
job_context->full_scan_candidate_files.emplace_back("/" + file, job_context->full_scan_candidate_files.emplace_back(
path_id); "/" + file, static_cast<uint32_t>(path_id));
} }
} }

@ -8058,7 +8058,7 @@ TEST_F(DBTest, OptimizeFiltersForHits) {
MoveFilesToLevel(7 /* level */, 1 /* column family index */); MoveFilesToLevel(7 /* level */, 1 /* column family index */);
std::string value = Get(1, Key(0)); std::string value = Get(1, Key(0));
long prev_cache_filter_hits = uint64_t prev_cache_filter_hits =
TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
value = Get(1, Key(0)); value = Get(1, Key(0));
ASSERT_EQ(prev_cache_filter_hits + 1, ASSERT_EQ(prev_cache_filter_hits + 1,
@ -8087,7 +8087,7 @@ TEST_F(DBTest, OptimizeFiltersForHits) {
ReopenWithColumnFamilies({"default", "mypikachu"}, options); ReopenWithColumnFamilies({"default", "mypikachu"}, options);
long prev_cache_filter_misses = uint64_t prev_cache_filter_misses =
TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
Get(1, Key(0)); Get(1, Key(0));
@ -9096,7 +9096,8 @@ TEST_F(DBTest, DelayedWriteRate) {
dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForFlushMemTable();
estimated_sleep_time += size_memtable * 1000000u / cur_rate; estimated_sleep_time += size_memtable * 1000000u / cur_rate;
// Slow down twice. One for memtable switch and one for flush finishes. // Slow down twice. One for memtable switch and one for flush finishes.
cur_rate /= kSlowdownRatio * kSlowdownRatio; cur_rate = static_cast<uint64_t>(static_cast<double>(cur_rate) /
kSlowdownRatio / kSlowdownRatio);
} }
// Estimate the total sleep time fall into the rough range. // Estimate the total sleep time fall into the rough range.
ASSERT_GT(env_->addon_time_.load(), ASSERT_GT(env_->addon_time_.load(),
@ -10324,7 +10325,7 @@ TEST_F(DBTest, PinnedDataIteratorRandomized) {
Random rnd(301); Random rnd(301);
int puts = 100000; int puts = 100000;
int key_pool = puts * 0.7; int key_pool = static_cast<int>(puts * 0.7);
int key_size = 100; int key_size = 100;
int val_size = 1000; int val_size = 1000;
int seeks_percentage = 20; // 20% of keys will be used to test seek() int seeks_percentage = 20; // 20% of keys will be used to test seek()
@ -10353,19 +10354,19 @@ TEST_F(DBTest, PinnedDataIteratorRandomized) {
// Insert data to true_data map and to DB // Insert data to true_data map and to DB
true_data[k] = v; true_data[k] = v;
if (rnd.OneIn(100.0 / merge_percentage)) { if (rnd.OneIn(static_cast<int>(100.0 / merge_percentage))) {
ASSERT_OK(db_->Merge(WriteOptions(), k, v)); ASSERT_OK(db_->Merge(WriteOptions(), k, v));
} else { } else {
ASSERT_OK(Put(k, v)); ASSERT_OK(Put(k, v));
} }
// Pick random keys to be used to test Seek() // Pick random keys to be used to test Seek()
if (rnd.OneIn(100.0 / seeks_percentage)) { if (rnd.OneIn(static_cast<int>(100.0 / seeks_percentage))) {
random_keys.push_back(k); random_keys.push_back(k);
} }
// Delete some random keys // Delete some random keys
if (rnd.OneIn(100.0 / delete_percentage)) { if (rnd.OneIn(static_cast<int>(100.0 / delete_percentage))) {
deleted_keys.push_back(k); deleted_keys.push_back(k);
true_data.erase(k); true_data.erase(k);
ASSERT_OK(Delete(k)); ASSERT_OK(Delete(k));

@ -190,7 +190,8 @@ class Repairer {
assert(path_id == 0); assert(path_id == 0);
logs_.push_back(number); logs_.push_back(number);
} else if (type == kTableFile) { } else if (type == kTableFile) {
table_fds_.emplace_back(number, path_id, 0); table_fds_.emplace_back(number, static_cast<uint32_t>(path_id),
0);
} else { } else {
// Ignore other files // Ignore other files
} }

Loading…
Cancel
Save