version : 2.1
orbs :
win : circleci/windows@2.4.0
commands :
install-cmake-on-macos :
steps :
- run :
name : Install cmake on macos
command : |
HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake
install-jdk8-on-macos :
steps :
- run :
name : Install JDK 8 on macos
command : |
brew install --cask adoptopenjdk/openjdk/adoptopenjdk8
increase-max-open-files-on-macos :
steps :
- run :
name : Increase max open files
command : |
sudo sysctl -w kern.maxfiles=1048576
sudo sysctl -w kern.maxfilesperproc=1048576
sudo launchctl limit maxfiles 1048576
pre-steps :
steps :
- checkout
- run :
name : Setup Environment Variables
command : |
echo "export GTEST_THROW_ON_FAILURE=0" >> $BASH_ENV
echo "export GTEST_OUTPUT=\"xml:/tmp/test-results/\"" >> $BASH_ENV
echo "export SKIP_FORMAT_BUCK_CHECKS=1" >> $BASH_ENV
echo "export GTEST_COLOR=1" >> $BASH_ENV
echo "export CTEST_OUTPUT_ON_FAILURE=1" >> $BASH_ENV
echo "export CTEST_TEST_TIMEOUT=300" >> $BASH_ENV
echo "export ZLIB_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zlib" >> $BASH_ENV
echo "export BZIP2_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/bzip2" >> $BASH_ENV
echo "export SNAPPY_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/snappy" >> $BASH_ENV
echo "export LZ4_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/lz4" >> $BASH_ENV
echo "export ZSTD_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zstd" >> $BASH_ENV
pre-steps-macos :
steps :
- pre-steps
post-steps :
steps :
- store_test_results : # store test result if there's any
path : /tmp/test-results
- store_artifacts : # store LOG for debugging if there's any
path : LOG
- run : # on fail, compress Test Logs for diagnosing the issue
name : Compress Test Logs
command : tar -cvzf t.tar.gz t
when : on_fail
- store_artifacts : # on fail, store Test Logs for diagnosing the issue
path : t.tar.gz
destination : test_logs
when : on_fail
- run : # store core dumps if there's any
command : |
mkdir -p /tmp/core_dumps
cp core.* /tmp/core_dumps
when : on_fail
- store_artifacts :
path : /tmp/core_dumps
when : on_fail
upgrade-cmake :
steps :
- run :
name : Upgrade cmake
command : |
sudo apt remove --purge cmake
sudo snap install cmake --classic
install-gflags :
steps :
- run :
name : Install gflags
command : |
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
install-gflags-on-macos :
steps :
- run :
name : Install gflags on macos
command : |
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
setup-folly :
steps :
- run :
name : Checkout folly sources
command : |
make checkout_folly
build-folly :
steps :
- run :
name : Build folly and dependencies
command : |
make build_folly
build-for-benchmarks :
steps :
- pre-steps
- run :
name : "Linux build for benchmarks"
command : #sized for the resource-class rocksdb-benchmark-sys1
make V=1 J=8 -j8 release
perform-benchmarks :
steps :
- run :
name : "Test low-variance benchmarks"
command : ./tools/benchmark_ci.py --db_dir /tmp/rocksdb-benchmark-datadir --output_dir /tmp/benchmark-results --num_keys 10000000
environment :
LD_LIBRARY_PATH : /usr/local/lib
# How long to run parts of the test(s)
DURATION_RO : 400
DURATION_RW : 700
# Keep threads within physical capacity of server (much lower than default)
NUM_THREADS : 1
MAX_BACKGROUND_JOBS : 3
# Don't run a couple of "optional" initial tests
CI_TESTS_ONLY : "true"
# Reduce configured size of levels to ensure more levels in the leveled compaction LSM tree
WRITE_BUFFER_SIZE_MB : 16
TARGET_FILE_SIZE_BASE_MB : 16
MAX_BYTES_FOR_LEVEL_BASE_MB : 64
# The benchmark host has 32GB memory
# The following values are tailored to work with that
# Note, tests may not exercise the targeted issues if the memory is increased on new test hosts.
post-benchmarks :
steps :
- store_artifacts : # store the benchmark output
path : /tmp/benchmark-results
destination : test_logs
- run :
name : Send benchmark report to visualisation
command : |
set +e
set +o pipefail
./build_tools/benchmark_log_tool.py --tsvfile /tmp/benchmark-results/report.tsv --esdocument https://search-rocksdb-bench-k2izhptfeap2hjfxteolsgsynm.us-west-2.es.amazonaws.com/bench_test3_rix/_doc
true
executors :
windows-2xlarge :
machine :
image : 'windows-server-2019-vs2019:stable'
resource_class : windows.2xlarge
shell : bash.exe
linux-docker :
docker :
# The image configuration is build_tools/ubuntu20_image/Dockerfile
# To update and build the image:
# $ cd build_tools/ubuntu20_image
# $ docker build -t zjay437/rocksdb:0.5 .
# $ docker push zjay437/rocksdb:0.5
# `zjay437` is the account name for zjay@meta.com which readwrite token is shared internally. To login:
# $ docker login --username zjay437
# Or please feel free to change it to your docker hub account for hosting the image, meta employee should already have the account and able to login with SSO.
# To avoid impacting the existing CI runs, please bump the version every time creating a new image
# to run the CI image environment locally:
# $ docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it zjay437/rocksdb:0.5 bash
# option `--cap-add=SYS_PTRACE --security-opt seccomp=unconfined` is used to enable gdb to attach an existing process
- image : zjay437/rocksdb:0.6
jobs :
build-macos :
macos :
xcode : 12.5 .1
resource_class : large
environment :
ROCKSDB_DISABLE_JEMALLOC : 1 # jemalloc cause env_test hang, disable it for now
steps :
- increase-max-open-files-on-macos
- install-gflags-on-macos
- pre-steps-macos
- run : ulimit -S -n `ulimit -H -n` && OPT=-DCIRCLECI make V=1 J=32 -j32 all
- post-steps
build-macos-cmake :
macos :
xcode : 12.5 .1
resource_class : large
parameters :
run_even_tests :
description : run even or odd tests, used to split tests to 2 groups
type : boolean
default : true
steps :
- increase-max-open-files-on-macos
- install-cmake-on-macos
- install-gflags-on-macos
- pre-steps-macos
- run :
name : "cmake generate project file"
command : ulimit -S -n `ulimit -H -n` && mkdir build && cd build && cmake -DWITH_GFLAGS=1 ..
- run :
name : "Build tests"
command : cd build && make V=1 -j32
- when :
condition : << parameters.run_even_tests >>
steps :
- run :
name : "Run even tests"
command : ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 0,,2
- when :
condition :
not : << parameters.run_even_tests >>
steps :
- run :
name : "Run odd tests"
command : ulimit -S -n `ulimit -H -n` && cd build && ctest -j32 -I 1,,2
- post-steps
build-linux :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : make V=1 J=32 -j32 check
- post-steps
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
build-linux-encrypted_env-no_compression :
executor : linux-docker
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
4 years ago
resource_class : 2xlarge
steps :
- pre-steps
- run : ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
- run : |
./sst_dump --help | grep -E -q 'Supported compression types : kNoCompression$' # Verify no compiled in compression
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
4 years ago
- post-steps
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
build-linux-shared_lib-alt_namespace-status_checked :
executor : linux-docker
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
resource_class : 2xlarge
steps :
- pre-steps
- run : ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check
- post-steps
build-linux-release :
executor : linux-docker
resource_class : 2xlarge
steps :
- checkout # check out the code in the project directory
- run : make V=1 -j32 release
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
- run : ./db_stress --version # ensure with gflags
- run : make clean
- run : apt-get remove -y libgflags-dev
- run : make V=1 -j32 release
- run : if ./db_stress --version; then false; else true; fi # ensure without gflags
- post-steps
build-linux-release-rtti :
executor : linux-docker
resource_class : xlarge
steps :
- checkout # check out the code in the project directory
- run : USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
- run : ./db_stress --version # ensure with gflags
- run : make clean
- run : apt-get remove -y libgflags-dev
- run : USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
- run : if ./db_stress --version; then false; else true; fi # ensure without gflags
build-linux-lite :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run : LITE=1 make V=1 J=8 -j8 check
- post-steps
build-linux-lite-release :
executor : linux-docker
resource_class : large
steps :
- checkout # check out the code in the project directory
- run : LITE=1 make V=1 -j8 release
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
- run : ./db_stress --version # ensure with gflags
- run : make clean
- run : apt-get remove -y libgflags-dev
- run : LITE=1 make V=1 -j8 release
- run : if ./db_stress --version; then false; else true; fi # ensure without gflags
- post-steps
build-linux-clang-no_test_run :
executor : linux-docker
resource_class : xlarge
steps :
- checkout # check out the code in the project directory
- run : CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
- post-steps
build-linux-clang10-asan :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
- post-steps
build-linux-clang10-mini-tsan :
executor : linux-docker
resource_class : 2xlarge+
steps :
- pre-steps
- run : COMPILE_WITH_TSAN=1 CC=clang-13 CXX=clang++-13 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check
- post-steps
build-linux-clang10-ubsan :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
- post-steps
build-linux-valgrind :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : PORTABLE=1 make V=1 -j32 valgrind_test
- post-steps
build-linux-clang10-clang-analyze :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
- post-steps
- run :
name : "compress test report"
command : tar -cvzf scan_build_report.tar.gz scan_build_report
when : on_fail
- store_artifacts :
path : scan_build_report.tar.gz
destination : scan_build_report
when : on_fail
build-linux-runner :
machine : true
resource_class : facebook/rocksdb-benchmark-sys1
steps :
- pre-steps
- run :
name : "Checked Linux build (Runner)"
command : make V=1 J=8 -j8 check
environment :
LD_LIBRARY_PATH : /usr/local/lib
- post-steps
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
build-linux-cmake-with-folly :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- setup-folly
- build-folly
- run : (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 -DROCKSDB_BUILD_SHARED=0 .. && make V=1 -j20 && ctest -j20)
- post-steps
build-linux-cmake-with-folly-lite-no-test :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- setup-folly
- run : (mkdir build && cd build && cmake -DUSE_FOLLY_LITE=1 -DWITH_GFLAGS=1 .. && make V=1 -j20)
- post-steps
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
build-linux-cmake-with-benchmark :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20
- post-steps
build-linux-unity-and-headers :
docker : # executor type
- image : gcc:latest
Avoid some warnings-as-error in CircleCI+unity+AVX512F (#9978)
Summary:
Example failure when compiling on sufficiently new hardware and built-in headers:
```
In file included from /usr/local/lib/gcc/x86_64-linux-gnu/12.1.0/include/immintrin.h:49,
from ./util/bloom_impl.h:21,
from table/block_based/filter_policy.cc:31,
from unity.cc:167:
In function '__m512i _mm512_shuffle_epi32(__m512i, _MM_PERM_ENUM)',
inlined from 'void XXH3_accumulate_512_avx512(void*, const void*, const void*)' at util/xxhash.h:3605:58,
inlined from 'void XXH3_accumulate(xxh_u64*, const xxh_u8*, const xxh_u8*, size_t, XXH3_f_accumulate_512)' at util/xxhash.h:4229:17,
inlined from 'void XXH3_hashLong_internal_loop(xxh_u64*, const xxh_u8*, size_t, const xxh_u8*, size_t, XXH3_f_accumulate_512, XXH3_f_scrambleAcc)' at util/xxhash.h:4251:24,
inlined from 'XXH128_hash_t XXH3_hashLong_128b_internal(const void*, size_t, const xxh_u8*, size_t, XXH3_f_accumulate_512, XXH3_f_scrambleAcc)' at util/xxhash.h:5065:32,
inlined from 'XXH128_hash_t XXH3_hashLong_128b_withSecret(const void*, size_t, XXH64_hash_t, const void*, size_t)' at util/xxhash.h:5104:39:
/usr/local/lib/gcc/x86_64-linux-gnu/12.1.0/include/avx512fintrin.h:4459:50: error: '__Y' may be used uninitialized [-Werror=maybe-uninitialized]
```
https://app.circleci.com/pipelines/github/facebook/rocksdb/13295/workflows/1695fb5c-40c1-423b-96b4-45107dc3012d/jobs/360416
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9978
Test Plan:
I was able to re-run in CircleCI with ssh, see the failure, ssh in and
verify that adding -fno-avx512f fixed the failure. Will watch build-linux-unity-and-headers
Reviewed By: riversand963
Differential Revision: D36296028
Pulled By: pdillinger
fbshipit-source-id: ba5955cf2ac730f57d1d18c2f517e92f34be77a3
3 years ago
environment :
EXTRA_CXXFLAGS : -mno-avx512f # Warnings-as-error in avx512fintrin.h, would be used on newer hardware
resource_class : large
steps :
- checkout # check out the code in the project directory
Major CircleCI/Linux fixes / tweaks / enhancements (#7078)
Summary:
Primarily, this change adds a way to work around a bug limiting the effective output (and therefore debugability) of the Linux builds using parallel make. We would get
make[1]: write error: stdout
probably due to a kernel bug, apparently affecting both available ubuntu 16 machine images (maybe not affecting docker images, less horsepower). https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393
Now in the CircleCI config, make output on Ubuntu is piped through a custom 'cat' that ignores EAGAIN errors, which seems to fix the problem.
Significant other changes:
* Add another linux build that combines
* LIB_MODE=shared, to ensure this works with compile and unit test execution
* Alternative rocksdb namespace, to ensure this works (not rely on Travis)
* ASSERT_STATUS_CHECKED=1, but with building all unit tests and running those expected to pass with it
* Run release build with and without gflags. (Was running only without, ignore large swaths of code in a normal release build! Two regressions in this build, only with gflags, in the last week not caught by CI!)
* Use gflags with unity and LITE build, as typical case.
Debugability improvements:
* Use V=1 to show commands being executed (thanks to EAGAIN work-around)
* Print kernel version and compiler versions as part of V=1 output from Makefile
Cosmetic other changes:
* Put more commands on one line, for less clutter in CircleCI output pages
* Remove redundant "all" in "make all check" and put make command options before targets
* Change some recursive "make clean" into dependency on "clean," toward minimizing unnecessary overhead (detect platform, build version, etc.) of extra recursive makes
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7078
Reviewed By: siying
Differential Revision: D22391647
Pulled By: pdillinger
fbshipit-source-id: d446fccf5a8c568b37dc8748621c8a5c546fe135
5 years ago
- run : apt-get update -y && apt-get install -y libgflags-dev
- run : make V=1 -j8 unity_test
- run : make V=1 -j8 -k check-headers # could be moved to a different build
- post-steps
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
build-linux-gcc-7-with-folly :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- setup-folly
- build-folly
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
- run : USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
- post-steps
build-linux-gcc-7-with-folly-lite-no-test :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- setup-folly
- run : USE_FOLLY_LITE=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 all
- post-steps
build-linux-gcc-8-no_test_run :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : CC=gcc-8 CXX=g++-8 V=1 make -j32 all
- post-steps
build-linux-cmake-with-folly-coroutines :
executor : linux-docker
resource_class : 2xlarge
environment :
CC : gcc-10
CXX : g++-10
steps :
- pre-steps
- setup-folly
- build-folly
- run : (mkdir build && cd build && cmake -DUSE_COROUTINES=1 -DWITH_GFLAGS=1 -DROCKSDB_BUILD_SHARED=0 .. && make V=1 -j20 && ctest -j20)
- post-steps
build-linux-gcc-10-cxx20-no_test_run :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j32 all
- post-steps
build-linux-gcc-11-no_test_run :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : CC=gcc-11 CXX=g++-11 V=1 make -j32 all microbench
- post-steps
build-linux-clang-13-no_test_run :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j32 all microbench
- post-steps
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
build-linux-clang-13-asan-ubsan-with-folly :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- setup-folly
- build-folly
- run : CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
- post-steps
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
build-linux-run-microbench :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run : DEBUG_LEVEL=0 make -j32 run_microbench
- post-steps
build-linux-mini-crashtest :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run : ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS='--duration=960 --max_key=2500000' blackbox_crash_test_with_atomic_flush
- post-steps
build-linux-crashtest-tiered-storage-bb :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run :
name : "run crashtest"
command : ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 blackbox_crash_test_with_tiered_storage
no_output_timeout : 100m
- post-steps
build-linux-crashtest-tiered-storage-wb :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run :
name : "run crashtest"
command : ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 whitebox_crash_test_with_tiered_storage
no_output_timeout : 100m
- post-steps
build-windows :
executor : windows-2xlarge
parameters :
extra_cmake_opt :
default : ""
type : string
vs_year :
default : "2019"
type : string
cmake_generator :
default : "Visual Studio 16 2019"
type : string
environment :
THIRDPARTY_HOME : C:/Users/circleci/thirdparty
CMAKE_HOME : C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64
CMAKE_BIN : C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64/bin/cmake.exe
SNAPPY_HOME : C:/Users/circleci/thirdparty/snappy-1.1.7
SNAPPY_INCLUDE : C:/Users/circleci/thirdparty/snappy-1.1.7;C:/Users/circleci/thirdparty/snappy-1.1.7/build
SNAPPY_LIB_DEBUG : C:/Users/circleci/thirdparty/snappy-1.1.7/build/Debug/snappy.lib
VS_YEAR : <<parameters.vs_year>>
CMAKE_GENERATOR : <<parameters.cmake_generator>>
steps :
- checkout
- run :
name : "Setup VS"
command : |
if [[ "${VS_YEAR}" == "2019" ]]; then
echo "VS2019 already present."
elif [[ "${VS_YEAR}" == "2017" ]]; then
echo "Installing VS2017..."
powershell .circleci/vs2017_install.ps1
elif [[ "${VS_YEAR}" == "2015" ]]; then
echo "Installing VS2015..."
powershell .circleci/vs2015_install.ps1
fi
- store_artifacts :
path : \Users\circleci\AppData\Local\Temp\vslogs.zip
- run :
name : "Install thirdparty dependencies"
command : |
mkdir ${THIRDPARTY_HOME}
cd ${THIRDPARTY_HOME}
echo "Installing CMake..."
curl --fail --silent --show-error --output cmake-3.16.4-win64-x64.zip --location https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-win64-x64.zip
unzip -q cmake-3.16.4-win64-x64.zip
echo "Building Snappy dependency..."
curl --fail --silent --show-error --output snappy-1.1.7.zip --location https://github.com/google/snappy/archive/1.1.7.zip
unzip -q snappy-1.1.7.zip
cd snappy-1.1.7
mkdir build
cd build
${CMAKE_BIN} -G "${CMAKE_GENERATOR}" ..
msbuild.exe Snappy.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
- run :
name : "Build RocksDB"
command : |
mkdir build
cd build
${CMAKE_BIN} -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=Debug -DOPTDBG=1 -DPORTABLE=1 -DSNAPPY=1 -DJNI=1 << parameters.extra_cmake_opt >> ..
cd ..
echo "Building with VS version: ${CMAKE_GENERATOR}"
msbuild.exe build/rocksdb.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
- run :
name : "Test RocksDB"
shell : powershell.exe
command : |
build_tools\run_ci_db_test.ps1 -SuiteRun arena_test,db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
build-linux-java :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Test RocksDBJava"
command : make V=1 J=8 -j8 jtest
- post-steps
build-linux-java-static :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Build RocksDBJava Static Library"
command : make V=1 J=8 -j8 rocksdbjavastatic
- post-steps
build-macos-java :
macos :
xcode : 12.5 .1
resource_class : large
environment :
JAVA_HOME : /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
ROCKSDB_DISABLE_JEMALLOC : 1 # jemalloc causes java 8 crash
steps :
- increase-max-open-files-on-macos
- install-gflags-on-macos
- install-jdk8-on-macos
- pre-steps-macos
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Test RocksDBJava"
command : make V=1 J=16 -j16 jtest
no_output_timeout : 20m
- post-steps
build-macos-java-static :
macos :
xcode : 12.5 .1
resource_class : large
environment :
JAVA_HOME : /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
steps :
- increase-max-open-files-on-macos
- install-gflags-on-macos
- install-cmake-on-macos
- install-jdk8-on-macos
- pre-steps-macos
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Build RocksDBJava x86 and ARM Static Libraries"
command : make V=1 J=16 -j16 rocksdbjavastaticosx
no_output_timeout : 20m
- post-steps
build-macos-java-static-universal :
macos :
xcode : 12.5 .1
resource_class : large
environment :
JAVA_HOME : /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
steps :
- increase-max-open-files-on-macos
- install-gflags-on-macos
- install-cmake-on-macos
- install-jdk8-on-macos
- pre-steps-macos
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Build RocksDBJava Universal Binary Static Library"
command : make V=1 J=16 -j16 rocksdbjavastaticosx_ub
no_output_timeout : 20m
- post-steps
build-examples :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run :
name : "Build examples"
command : |
make V=1 -j4 static_lib && cd examples && make V=1 -j4
- post-steps
build-cmake-mingw :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run : update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
- run :
name : "Build cmake-mingw"
command : |
export PATH=$JAVA_HOME/bin:$PATH
echo "JAVA_HOME=${JAVA_HOME}"
which java && java -version
which javac && javac -version
mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni
- post-steps
build-linux-non-shm :
executor : linux-docker
resource_class : 2xlarge
environment :
TEST_TMPDIR : /tmp/rocksdb_test_tmp
steps :
- pre-steps
- run : make V=1 -j32 check
- post-steps
build-linux-arm-test-full :
machine :
image : ubuntu-2004:202111-02
resource_class : arm.large
steps :
- pre-steps
- install-gflags
- run : make V=1 J=4 -j4 check
- post-steps
build-linux-arm :
machine :
image : ubuntu-2004:202111-02
resource_class : arm.large
steps :
- pre-steps
- install-gflags
- run : ROCKSDBTESTS_PLATFORM_DEPENDENT=only make V=1 J=4 -j4 all_but_some_tests check_some
- post-steps
build-linux-arm-cmake-no_test_run :
machine :
image : ubuntu-2004:202111-02
resource_class : arm.large
environment :
JAVA_HOME : /usr/lib/jvm/java-8-openjdk-arm64
steps :
- pre-steps
- install-gflags
- run :
name : "Set Java Environment"
command : |
echo "JAVA_HOME=${JAVA_HOME}"
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
which java && java -version
which javac && javac -version
- run :
name : "Build with cmake"
command : |
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=1 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 ..
make -j4
- run :
name : "Build Java with cmake"
command : |
rm -rf build
mkdir build
cd build
cmake -DJNI=1 -DCMAKE_BUILD_TYPE=Release -DWITH_GFLAGS=1 ..
make -j4 rocksdb rocksdbjni
- post-steps
build-format-compatible :
executor : linux-docker
resource_class : 2xlarge
steps :
- pre-steps
- run :
name : "test"
command : |
export TEST_TMPDIR=/dev/shm/rocksdb
rm -rf /dev/shm/rocksdb
mkdir /dev/shm/rocksdb
tools/check_format_compatible.sh
- post-steps
build-fuzzers :
executor : linux-docker
resource_class : large
steps :
- pre-steps
- run :
name : "Build rocksdb lib"
command : CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
- run :
name : "Build fuzzers"
command : cd fuzz && make sst_file_writer_fuzzer db_fuzzer db_map_fuzzer
- post-steps
benchmark-linux : #use a private Circle CI runner (resource_class) to run the job
machine : true
resource_class : facebook/rocksdb-benchmark-sys1
steps :
- build-for-benchmarks
- perform-benchmarks
- post-benchmarks
workflows :
version : 2
jobs-linux-run-tests :
jobs :
- build-linux
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
- build-linux-cmake-with-folly
- build-linux-cmake-with-folly-lite-no-test
- build-linux-gcc-7-with-folly
- build-linux-gcc-7-with-folly-lite-no-test
- build-linux-cmake-with-folly-coroutines
Meta-internal folly integration with F14FastMap (#9546)
Summary:
Especially after updating to C++17, I don't see a compelling case for
*requiring* any folly components in RocksDB. I was able to purge the existing
hard dependencies, and it can be quite difficult to strip out non-trivial components
from folly for use in RocksDB. (The prospect of doing that on F14 has changed
my mind on the best approach here.)
But this change creates an optional integration where we can plug in
components from folly at compile time, starting here with F14FastMap to replace
std::unordered_map when possible (probably no public APIs for example). I have
replaced the biggest CPU users of std::unordered_map with compile-time
pluggable UnorderedMap which will use F14FastMap when USE_FOLLY is set.
USE_FOLLY is always set in the Meta-internal buck build, and a simulation of
that is in the Makefile for public CI testing. A full folly build is not needed, but
checking out the full folly repo is much simpler for getting the dependency,
and anything else we might want to optionally integrate in the future.
Some picky details:
* I don't think the distributed mutex stuff is actually used, so it was easy to remove.
* I implemented an alternative to `folly::constexpr_log2` (which is much easier
in C++17 than C++11) so that I could pull out the hard dependencies on
`ConstexprMath.h`
* I had to add noexcept move constructors/operators to some types to make
F14's complainUnlessNothrowMoveAndDestroy check happy, and I added a
macro to make that easier in some common cases.
* Updated Meta-internal buck build to use folly F14Map (always)
No updates to HISTORY.md nor INSTALL.md as this is not (yet?) considered a
production integration for open source users.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9546
Test Plan:
CircleCI tests updated so that a couple of them use folly.
Most internal unit & stress/crash tests updated to use Meta-internal latest folly.
(Note: they should probably use buck but they currently use Makefile.)
Example performance improvement: when filter partitions are pinned in cache,
they are tracked by PartitionedFilterBlockReader::filter_map_ and we can build
a test that exercises that heavily. Build DB with
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters
```
and test with (simultaneous runs with & without folly, ~20 times each to see
convergence)
```
TEST_TMPDIR=/dev/shm/rocksdb ./db_bench_folly -readonly -use_existing_db -benchmarks=readrandom -num=10000000 -bloom_bits=16 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -partition_index_and_filters -duration=40 -pin_l0_filter_and_index_blocks_in_cache
```
Average ops/s no folly: 26229.2
Average ops/s with folly: 26853.3 (+2.4%)
Reviewed By: ajkr
Differential Revision: D34181736
Pulled By: pdillinger
fbshipit-source-id: ffa6ad5104c2880321d8a1aa7187e00ab0d02e94
3 years ago
- build-linux-cmake-with-benchmark
- build-linux-encrypted_env-no_compression
- build-linux-lite
jobs-linux-run-tests-san :
jobs :
- build-linux-clang10-asan
- build-linux-clang10-ubsan
- build-linux-clang10-mini-tsan
- build-linux-shared_lib-alt_namespace-status_checked
jobs-linux-no-test-run :
jobs :
- build-linux-release
- build-linux-release-rtti
- build-linux-lite-release
- build-examples
- build-fuzzers
- build-linux-clang-no_test_run
- build-linux-clang-13-no_test_run
- build-linux-gcc-8-no_test_run
- build-linux-gcc-10-cxx20-no_test_run
- build-linux-gcc-11-no_test_run
- build-linux-arm-cmake-no_test_run
jobs-linux-other-checks :
jobs :
- build-linux-clang10-clang-analyze
- build-linux-unity-and-headers
- build-linux-mini-crashtest
jobs-windows :
jobs :
- build-windows :
name : "build-windows-vs2019"
- build-windows :
name : "build-windows-vs2019-cxx20"
extra_cmake_opt : -DCMAKE_CXX_STANDARD=20
- build-windows :
name : "build-windows-vs2017"
vs_year : "2017"
cmake_generator : "Visual Studio 15 Win64"
- build-cmake-mingw
jobs-java :
jobs :
- build-linux-java
- build-linux-java-static
- build-macos-java
- build-macos-java-static
- build-macos-java-static-universal
jobs-macos :
jobs :
- build-macos
- build-macos-cmake :
run_even_tests : true
- build-macos-cmake :
run_even_tests : false
jobs-linux-arm :
jobs :
- build-linux-arm
build-fuzzers :
jobs :
- build-fuzzers
benchmark-linux :
triggers :
- schedule :
cron : "0 * * * *"
filters :
branches :
only :
- main
jobs :
- benchmark-linux
nightly :
triggers :
- schedule :
cron : "0 9 * * *"
filters :
branches :
only :
- main
jobs :
- build-format-compatible
- build-linux-arm-test-full
- build-linux-run-microbench
- build-linux-non-shm
- build-linux-clang-13-asan-ubsan-with-folly
- build-linux-valgrind