Fix MSVC-related build issues (#7439)

Summary:
This PR addresses some build and functional issues on MSVC targets, as a step towards an eventual goal of having RocksDB build successfully for Windows on ARM64.

Addressed issues include:
- BitsSetToOne and CountTrailingZeroBits do not compile on non-x64 MSVC targets. A fallback implementation of BitsSetToOne when Intel intrinsics are not available is added, based on the C++20 `<bit>` popcount implementation in Microsoft's STL.
- The implementation of FloorLog2 for MSVC targets (including x64) gives incorrect results. The unit test easily detects this, but CircleCI is currently configured to only run a specific set of tests for Windows CMake builds, so this seems to have been unnoticed.
- AsmVolatilePause does not use YieldProcessor on Windows ARM64 targets, even though it is available.
- When CondVar::TimedWait calls Microsoft STL's condition_variable::wait_for, it can potentially trigger a bug (just recently fixed in the upcoming VS 16.8's STL) that deadlocks various tests that wait for a timer to execute, since `Timer::Run` doesn't get a chance to execute before being blocked by the test function acquiring the mutex.
- In c_test, `GetTempDir` assumes a POSIX-style temp path.
- `NormalizePath` did not eliminate consecutive POSIX-style path separators on Windows, resulting in test failures in e.g., wal_manager_test.
- Various other test failures.

In a followup PR I hope to modify CircleCI's config.yml to invoke all RocksDB unit tests in Windows CMake builds with CTest, instead of the current use of `run_ci_db_test.ps1` which requires individual tests to be specified and is missing many of the existing tests.

Notes from peterd: FloorLog2 is not yet used in production code (it's for something in progress). I also added a few more inexpensive platform-dependent tests to Windows CircleCI runs. And included facebook/folly#1461 as requested

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7439

Reviewed By: jay-zhuang

Differential Revision: D24021563

Pulled By: pdillinger

fbshipit-source-id: 0ec2027c0d6a494d8a0fe38d9667fc2f7e29f7e7
main
Koby Kahane 4 years ago committed by Facebook GitHub Bot
parent 2f2e6e1e2c
commit 3e745053b7
  1. 2
      .circleci/config.yml
  2. 4
      db/c_test.c
  3. 7
      db/perf_context_test.cc
  4. 4
      file/filename.cc
  5. 9
      port/win/port_win.cc
  6. 2
      port/win/port_win.h
  7. 4
      third-party/folly/folly/chrono/Hardware.h
  8. 2
      util/filelock_test.cc
  9. 66
      util/math.h
  10. 4
      util/xxh3p.h

@ -276,7 +276,7 @@ jobs:
name: "Test RocksDB" name: "Test RocksDB"
shell: powershell.exe shell: powershell.exe
command: | command: |
build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,env_basic_test,env_test,db_merge_operand_test -Concurrency 16 build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
build-linux-java: build-linux-java:
machine: machine:

@ -58,7 +58,11 @@ static void StartPhase(const char* name) {
static const char* GetTempDir(void) { static const char* GetTempDir(void) {
const char* ret = getenv("TEST_TMPDIR"); const char* ret = getenv("TEST_TMPDIR");
if (ret == NULL || ret[0] == '\0') if (ret == NULL || ret[0] == '\0')
#ifdef OS_WIN
ret = getenv("TEMP");
#else
ret = "/tmp"; ret = "/tmp";
#endif
return ret; return ret;
} }
#ifdef _MSC_VER #ifdef _MSC_VER

@ -817,6 +817,13 @@ TEST_F(PerfContextTest, PerfContextByLevelGetSet) {
} }
TEST_F(PerfContextTest, CPUTimer) { TEST_F(PerfContextTest, CPUTimer) {
if (Env::Default()->NowCPUNanos() == 0) {
// TODO: This should be a GTEST_SKIP when the embedded gtest is updated
// to 1.10 or higher.
GTEST_SUCCESS_("Skipped on target without NowCPUNanos support");
return;
}
DestroyDB(kDbName, Options()); DestroyDB(kDbName, Options());
auto db = OpenDb(); auto db = OpenDb();
WriteOptions write_options; WriteOptions write_options;

@ -461,8 +461,8 @@ Status GetInfoLogFiles(Env* env, const std::string& db_log_dir,
std::string NormalizePath(const std::string& path) { std::string NormalizePath(const std::string& path) {
std::string dst; std::string dst;
for (auto c : path) { for (auto c : path) {
if (!dst.empty() && c == kFilePathSeparator && if (!dst.empty() && (c == kFilePathSeparator || c == '/') &&
dst.back() == kFilePathSeparator) { (dst.back() == kFilePathSeparator || dst.back() == '/')) {
continue; continue;
} }
dst.push_back(c); dst.push_back(c);

@ -98,6 +98,15 @@ bool CondVar::TimedWait(uint64_t abs_time_us) {
// Caller must ensure that mutex is held prior to calling this method // Caller must ensure that mutex is held prior to calling this method
std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock); std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock);
// Work around https://github.com/microsoft/STL/issues/369
#if defined(_MSC_VER) && \
(!defined(_MSVC_STL_UPDATE) || _MSVC_STL_UPDATE < 202008L)
if (relTimeUs == microseconds::zero()) {
lk.unlock();
lk.lock();
}
#endif
#ifndef NDEBUG #ifndef NDEBUG
mu_->locked_ = false; mu_->locked_ = false;
#endif #endif

@ -283,7 +283,7 @@ extern const size_t kPageSize;
#endif #endif
static inline void AsmVolatilePause() { static inline void AsmVolatilePause() {
#if defined(_M_IX86) || defined(_M_X64) #if defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM)
YieldProcessor(); YieldProcessor();
#endif #endif
// it would be nice to get "wfe" on ARM here // it would be nice to get "wfe" on ARM here

@ -10,7 +10,7 @@
#include <chrono> #include <chrono>
#include <cstdint> #include <cstdint>
#if _MSC_VER #if _MSC_VER && (defined(_M_IX86) || defined(_M_X64))
extern "C" std::uint64_t __rdtsc(); extern "C" std::uint64_t __rdtsc();
#pragma intrinsic(__rdtsc) #pragma intrinsic(__rdtsc)
#endif #endif
@ -18,7 +18,7 @@ extern "C" std::uint64_t __rdtsc();
namespace folly { namespace folly {
inline std::uint64_t hardware_timestamp() { inline std::uint64_t hardware_timestamp() {
#if _MSC_VER #if _MSC_VER && (defined(_M_IX86) || defined(_M_X64))
return __rdtsc(); return __rdtsc();
#elif __GNUC__ && (__i386__ || FOLLY_X64) #elif __GNUC__ && (__i386__ || FOLLY_X64)
return __builtin_ia32_rdtsc(); return __builtin_ia32_rdtsc();

@ -127,9 +127,11 @@ TEST_F(LockTest, LockBySameThread) {
// re-acquire the lock on the same file. This should fail. // re-acquire the lock on the same file. This should fail.
Status s = LockFile(&lock2); Status s = LockFile(&lock2);
ASSERT_TRUE(s.IsIOError()); ASSERT_TRUE(s.IsIOError());
#ifndef OS_WIN
// Validate that error message contains current thread ID. // Validate that error message contains current thread ID.
ASSERT_TRUE(s.ToString().find(ToString(Env::Default()->GetThreadID())) != ASSERT_TRUE(s.ToString().find(ToString(Env::Default()->GetThreadID())) !=
std::string::npos); std::string::npos);
#endif
// check the file is locked // check the file is locked
ASSERT_TRUE( AssertFileIsLocked() ); ASSERT_TRUE( AssertFileIsLocked() );

@ -23,13 +23,23 @@ inline int FloorLog2(T v) {
assert(v > 0); assert(v > 0);
#ifdef _MSC_VER #ifdef _MSC_VER
static_assert(sizeof(T) <= sizeof(uint64_t), "type too big"); static_assert(sizeof(T) <= sizeof(uint64_t), "type too big");
unsigned long lz = 0; unsigned long idx = 0;
if (sizeof(T) <= sizeof(uint32_t)) { if (sizeof(T) <= sizeof(uint32_t)) {
_BitScanReverse(&lz, static_cast<uint32_t>(v)); _BitScanReverse(&idx, static_cast<uint32_t>(v));
} else { } else {
_BitScanReverse64(&lz, static_cast<uint64_t>(v)); #if defined(_M_X64) || defined(_M_ARM64)
_BitScanReverse64(&idx, static_cast<uint64_t>(v));
#else
const auto vh = static_cast<uint32_t>(static_cast<uint64_t>(v) >> 32);
if (vh != 0) {
_BitScanReverse(&idx, static_cast<uint32_t>(vh));
idx += 32;
} else {
_BitScanReverse(&idx, static_cast<uint32_t>(v));
} }
return 63 - static_cast<int>(lz); #endif
}
return idx;
#else #else
static_assert(sizeof(T) <= sizeof(unsigned long long), "type too big"); static_assert(sizeof(T) <= sizeof(unsigned long long), "type too big");
if (sizeof(T) <= sizeof(unsigned int)) { if (sizeof(T) <= sizeof(unsigned int)) {
@ -56,7 +66,16 @@ inline int CountTrailingZeroBits(T v) {
if (sizeof(T) <= sizeof(uint32_t)) { if (sizeof(T) <= sizeof(uint32_t)) {
_BitScanForward(&tz, static_cast<uint32_t>(v)); _BitScanForward(&tz, static_cast<uint32_t>(v));
} else { } else {
#if defined(_M_X64) || defined(_M_ARM64)
_BitScanForward64(&tz, static_cast<uint64_t>(v)); _BitScanForward64(&tz, static_cast<uint64_t>(v));
#else
_BitScanForward(&tz, static_cast<uint32_t>(v));
if (tz == 0) {
_BitScanForward(&tz,
static_cast<uint32_t>(static_cast<uint64_t>(v) >> 32));
tz += 32;
}
#endif
} }
return static_cast<int>(tz); return static_cast<int>(tz);
#else #else
@ -71,6 +90,29 @@ inline int CountTrailingZeroBits(T v) {
#endif #endif
} }
#if defined(_MSC_VER) && !defined(_M_X64)
namespace detail {
template <typename T>
int BitsSetToOneFallback(T v) {
const int kBits = static_cast<int>(sizeof(T)) * 8;
static_assert((kBits & (kBits - 1)) == 0, "must be power of two bits");
// we static_cast these bit patterns in order to truncate them to the correct
// size
v = static_cast<T>(v - ((v >> 1) & static_cast<T>(0x5555555555555555ull)));
v = static_cast<T>((v & static_cast<T>(0x3333333333333333ull)) +
((v >> 2) & static_cast<T>(0x3333333333333333ull)));
v = static_cast<T>((v + (v >> 4)) & static_cast<T>(0x0F0F0F0F0F0F0F0Full));
for (int shift_bits = 8; shift_bits < kBits; shift_bits <<= 1) {
v += static_cast<T>(v >> shift_bits);
}
// we want the bottom "slot" that's big enough to represent a value up to
// (and including) kBits.
return static_cast<int>(v & static_cast<T>(kBits | (kBits - 1)));
}
} // namespace detail
#endif
// Number of bits set to 1. Also known as "population count". // Number of bits set to 1. Also known as "population count".
template <typename T> template <typename T>
inline int BitsSetToOne(T v) { inline int BitsSetToOne(T v) {
@ -82,11 +124,27 @@ inline int BitsSetToOne(T v) {
constexpr auto mm = 8 * sizeof(uint32_t) - 1; constexpr auto mm = 8 * sizeof(uint32_t) - 1;
// The bit mask is to neutralize sign extension on small signed types // The bit mask is to neutralize sign extension on small signed types
constexpr uint32_t m = (uint32_t{1} << ((8 * sizeof(T)) & mm)) - 1; constexpr uint32_t m = (uint32_t{1} << ((8 * sizeof(T)) & mm)) - 1;
#if defined(_M_X64) || defined(_M_IX86)
return static_cast<int>(__popcnt(static_cast<uint32_t>(v) & m)); return static_cast<int>(__popcnt(static_cast<uint32_t>(v) & m));
#else
return static_cast<int>(detail::BitsSetToOneFallback(v) & m);
#endif
} else if (sizeof(T) == sizeof(uint32_t)) { } else if (sizeof(T) == sizeof(uint32_t)) {
#if defined(_M_X64) || defined(_M_IX86)
return static_cast<int>(__popcnt(static_cast<uint32_t>(v))); return static_cast<int>(__popcnt(static_cast<uint32_t>(v)));
#else
return detail::BitsSetToOneFallback(static_cast<uint32_t>(v));
#endif
} else { } else {
#ifdef _M_X64
return static_cast<int>(__popcnt64(static_cast<uint64_t>(v))); return static_cast<int>(__popcnt64(static_cast<uint64_t>(v)));
#elif defined(_M_IX86)
return static_cast<int>(
__popcnt(static_cast<uint32_t>(static_cast<uint64_t>(v) >> 32) +
__popcnt(static_cast<uint32_t>(v))));
#else
return detail::BitsSetToOneFallback(static_cast<uint64_t>(v));
#endif
} }
#else #else
static_assert(sizeof(T) <= sizeof(unsigned long long), "type too big"); static_assert(sizeof(T) <= sizeof(unsigned long long), "type too big");

@ -253,7 +253,9 @@ XXH_FORCE_INLINE U64x2 XXH_vec_mule(U32x4 a, U32x4 b) {
#if defined(XXH_NO_PREFETCH) #if defined(XXH_NO_PREFETCH)
# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
#else #else
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ #if defined(_MSC_VER) && \
(defined(_M_X64) || \
defined(_M_IX86)) /* _mm_prefetch() is not defined outside of x86/x64 */
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )

Loading…
Cancel
Save