Migrate to docker for CI run (#10496)

Summary:
Moved linux builds to using docker to avoid CI instability caused by dependency installation site down.
Added the `Dockerfile` which is used to build the image.
The build time is also significantly reduced, because no dependencies installation and with using 2xlarge+ instance for slow build (like tsan test).
Also fixed a few issues detected while building this:
* `DestoryDB()` Status not checked for a few tests
* nullptr might be used in `inlineskiplist.cc`

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10496

Test Plan: CI

Reviewed By: ajkr

Differential Revision: D38554200

Pulled By: jay-zhuang

fbshipit-source-id: 16e8fb2bf07b9c84bb27fb18421c4d54f2f248fd
main
Jay Zhuang 2 years ago committed by Facebook GitHub Bot
parent a0798f6f92
commit 5d3aefb682
  1. 354
      .circleci/config.yml
  2. 56
      build_tools/ubuntu20_image/Dockerfile
  3. 2
      db/column_family_test.cc
  4. 16
      db/compact_files_test.cc
  5. 1
      db/corruption_test.cc
  6. 2
      db/db_block_cache_test.cc
  7. 8
      db/manual_compaction_test.cc
  8. 10
      db/perf_context_test.cc
  9. 3
      db/wal_manager_test.cc
  10. 8
      db/write_callback_test.cc
  11. 46
      env/env_test.cc
  12. 2
      include/rocksdb/status.h
  13. 2
      memtable/inlineskiplist.h
  14. 2
      memtable/write_buffer_manager_test.cc
  15. 7
      tools/db_sanity_test.cc
  16. 12
      tools/ldb_cmd_test.cc
  17. 2
      tools/reduce_levels_test.cc
  18. 98
      utilities/backup/backup_engine_test.cc
  19. 3
      utilities/cassandra/cassandra_functional_test.cc
  20. 4
      utilities/transactions/lock/range/range_locking_test.cc
  21. 4
      utilities/transactions/optimistic_transaction_test.cc
  22. 4
      utilities/ttl/ttl_test.cc
  23. 4
      utilities/write_batch_with_index/write_batch_with_index_test.cc

@ -3,11 +3,6 @@ version: 2.1
orbs:
win: circleci/windows@2.4.0
aliases:
- &notify-on-main-failure
fail_only: true
only_for_branches: main
commands:
install-cmake-on-macos:
steps:
@ -61,35 +56,21 @@ commands:
- store_artifacts: # store LOG for debugging if there's any
path: LOG
- run: # on fail, compress Test Logs for diagnosing the issue
name: Compress Test Logs
command: tar -cvzf t.tar.gz t
when: on_fail
name: Compress Test Logs
command: tar -cvzf t.tar.gz t
when: on_fail
- store_artifacts: # on fail, store Test Logs for diagnosing the issue
path: t.tar.gz
destination: test_logs
when: on_fail
install-clang-10:
steps:
- run:
name: Install Clang 10
- run: # store core dumps if there's any
command: |
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
sudo apt-get update -y && sudo apt-get install -y clang-10
install-clang-13:
steps:
- run:
name: Install Clang 13
command: |
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list
echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo apt-get update -y && sudo apt-get install -y clang-13
mkdir -p /tmp/core_dumps
cp core.* /tmp/core_dumps
when: on_fail
- store_artifacts:
path: /tmp/core_dumps
when: on_fail
install-gflags:
steps:
@ -98,33 +79,6 @@ commands:
command: |
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
install-benchmark:
steps:
- run:
name: Install ninja build
command: sudo apt-get update -y && sudo apt-get install -y ninja-build
- run:
name: Install benchmark
command: |
git clone --depth 1 --branch v1.6.1 https://github.com/google/benchmark.git ~/benchmark
cd ~/benchmark && mkdir build && cd build
cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0
ninja && sudo ninja install
install-valgrind:
steps:
- run:
name: Install valgrind
command: sudo apt-get update -y && sudo apt-get install -y valgrind
upgrade-cmake:
steps:
- run:
name: Upgrade cmake
command: |
sudo apt remove --purge cmake
sudo snap install cmake --classic
install-gflags-on-macos:
steps:
- run:
@ -132,48 +86,8 @@ commands:
command: |
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
install-gtest-parallel:
steps:
- run:
name: Install gtest-parallel
command: |
git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel
echo 'export PATH=$HOME/gtest-parallel:$PATH' >> $BASH_ENV
install-compression-libs:
steps:
- run:
name: Install compression libs
command: |
sudo apt-get update -y && sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
install-streaming-compress-libs:
steps:
- run:
name: Install streaming compression libs
command: |
sudo apt-get update -y && sudo apt-get install -y libzstd-dev
install-libprotobuf-mutator:
steps:
- run:
name: Install libprotobuf-mutator libs
command: |
git clone -b v1.0 git@github.com:google/libprotobuf-mutator.git ~/libprotobuf-mutator
cd ~/libprotobuf-mutator && git checkout ffd86a32874e5c08a143019aad1aaf0907294c9f && mkdir build && cd build
cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON
ninja && sudo ninja install
- run:
name: Setup environment variables
command: |
echo "export PKG_CONFIG_PATH=/usr/local/OFF/:~/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/" >> $BASH_ENV
echo "export PROTOC_BIN=~/libprotobuf-mutator/build/external.protobuf/bin/protoc" >> $BASH_ENV
setup-folly:
steps:
- run:
name: Install folly dependencies
command: |
sudo apt-get install libgoogle-glog-dev
- run:
name: Checkout folly sources
command: |
@ -221,6 +135,21 @@ executors:
image: 'windows-server-2019-vs2019:stable'
resource_class: windows.2xlarge
shell: bash.exe
linux-docker:
docker:
# The image configuration is build_tools/ubuntu20_image/Dockerfile
# To update and build the image:
# $ cd build_tools/ubuntu20_image
# $ docker build -t zjay437/rocksdb:0.5 .
# $ docker push zjay437/rocksdb:0.5
# `zjay437` is the account name for zjay@meta.com which readwrite token is shared internally. To login:
# $ docker login --username zjay437
# Or please feel free to change it to your docker hub account for hosting the image, meta employee should already have the account and able to login with SSO.
# To avoid impacting the existing CI runs, please bump the version every time creating a new image
# to run the CI image environment locally:
# $ docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it zjay437/rocksdb:0.5 bash
# option `--cap-add=SYS_PTRACE --security-opt seccomp=unconfined` is used to enable gdb to attach an existing process
- image: zjay437/rocksdb:0.5
jobs:
build-macos:
@ -272,172 +201,132 @@ jobs:
- post-steps
build-linux:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-streaming-compress-libs
- run: make V=1 J=32 -j32 check
- post-steps
build-linux-encrypted_env-no_compression:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
- run: |
./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
- post-steps
build-linux-shared_lib-alt_namespace-status_checked:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check
- post-steps
build-linux-release:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- checkout # check out the code in the project directory
- run: make V=1 -j32 release
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- install-gflags
- run: make V=1 -j32 release
- run: ./db_stress --version # ensure with gflags
- run: make clean
- run: apt-get remove -y libgflags-dev
- run: make V=1 -j32 release
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- post-steps
build-linux-release-rtti:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: xlarge
steps:
- checkout # check out the code in the project directory
- run: make clean
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- run: sudo apt-get update -y && sudo apt-get install -y libgflags-dev
- run: ./db_stress --version # ensure with gflags
- run: make clean
- run: apt-get remove -y libgflags-dev
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
- run: ./db_stress --version # ensure with gflags
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
build-linux-lite:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- pre-steps
- install-gflags
- run: LITE=1 make V=1 J=8 -j8 check
- post-steps
build-linux-lite-release:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- checkout # check out the code in the project directory
- run: LITE=1 make V=1 -j8 release
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- install-gflags
- run: LITE=1 make V=1 -j8 release
- run: ./db_stress --version # ensure with gflags
- run: make clean
- run: apt-get remove -y libgflags-dev
- run: LITE=1 make V=1 -j8 release
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- post-steps
build-linux-clang-no_test_run:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: xlarge
steps:
- checkout # check out the code in the project directory
- run: sudo apt-get update -y && sudo apt-get install -y clang libgflags-dev libtbb-dev
- run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
- post-steps
build-linux-clang10-asan:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-clang-10
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
- post-steps
build-linux-clang10-mini-tsan:
machine:
image: ubuntu-2004:202111-02
resource_class: 2xlarge
# find test list by `make list_all_tests`
parameters:
start_test:
default: ""
type: string
end_test:
default: ""
type: string
executor: linux-docker
resource_class: 2xlarge+
steps:
- pre-steps
- install-gflags
- install-clang-10
- install-gtest-parallel
- run:
name: "Build unit tests"
command: |
echo "env: $(env)"
ROCKSDBTESTS_START=<<parameters.start_test>> ROCKSDBTESTS_END=<<parameters.end_test>> ROCKSDBTESTS_SUBSET_TESTS_TO_FILE=/tmp/test_list COMPILE_WITH_TSAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 --output-sync=target build_subset_tests
- run:
name: "Run unit tests in parallel"
command: |
sed -i 's/[[:space:]]*$//; s/ / \.\//g; s/.*/.\/&/' /tmp/test_list
cat /tmp/test_list
gtest-parallel $(</tmp/test_list) --output_dir=/tmp | cat # pipe to cat to continuously output status on circleci UI. Otherwise, no status will be printed while the job is running.
- run: COMPILE_WITH_TSAN=1 CC=clang-13 CXX=clang++-13 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check
- post-steps
build-linux-clang10-ubsan:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-clang-10
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
- post-steps
build-linux-valgrind:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-valgrind
- run: PORTABLE=1 make V=1 -j32 valgrind_test
- post-steps
build-linux-clang10-clang-analyze:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-clang-10
- run: sudo apt-get update -y && sudo apt-get install -y clang-tools-10
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
- post-steps
- run:
name: "compress test report"
command: tar -cvzf scan_build_report.tar.gz scan_build_report
when: on_fail
- store_artifacts:
path: scan_build_report.tar.gz
destination: scan_build_report
when: on_fail
build-linux-runner:
machine: true
@ -452,26 +341,20 @@ jobs:
- post-steps
build-linux-cmake-with-folly:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- upgrade-cmake
- setup-folly
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20)
- post-steps
build-linux-cmake-with-benchmark:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-benchmark
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20)
- run: mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20
- post-steps
build-linux-unity-and-headers:
@ -488,101 +371,78 @@ jobs:
- post-steps
build-linux-gcc-7-with-folly:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-7 g++-7 libgflags-dev
- setup-folly
- run: USE_FOLLY=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 check
- post-steps
build-linux-gcc-8-no_test_run:
machine:
image: ubuntu-2004:202111-02
resource_class: xlarge
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-8 g++-8 libgflags-dev
- run: CC=gcc-8 CXX=g++-8 V=1 make -j16 all
- run: CC=gcc-8 CXX=g++-8 V=1 make -j32 all
- post-steps
build-linux-gcc-10-cxx20-no_test_run:
machine:
image: ubuntu-2004:202111-02
resource_class: xlarge
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- run: sudo apt-get update -y && sudo apt-get install gcc-10 g++-10 libgflags-dev
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j16 all
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j32 all
- post-steps
build-linux-gcc-11-no_test_run:
machine:
image: ubuntu-2004:202111-02
resource_class: xlarge
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -y && sudo apt-get install gcc-11 g++-11 libgflags-dev
- install-benchmark
- run: CC=gcc-11 CXX=g++-11 V=1 make -j16 all microbench
- run: CC=gcc-11 CXX=g++-11 V=1 make -j32 all microbench
- post-steps
build-linux-clang-13-no_test_run:
machine:
image: ubuntu-2004:202111-02
resource_class: xlarge
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-clang-13
- install-benchmark
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all microbench
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j32 all microbench
- post-steps
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
build-linux-clang-13-asan-ubsan-with-folly:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-clang-13
- install-gflags
- setup-folly
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check
- post-steps
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
build-linux-run-microbench:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-benchmark
- run: DEBUG_LEVEL=0 make -j32 run_microbench
- post-steps
build-linux-mini-crashtest:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- pre-steps
- install-gflags
- install-compression-libs
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS=--duration=960 blackbox_crash_test_with_atomic_flush
- post-steps
build-linux-crashtest-tiered-storage-bb:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-compression-libs
- run:
name: "run crashtest"
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 blackbox_crash_test_with_tiered_storage
@ -590,13 +450,10 @@ jobs:
- post-steps
build-linux-crashtest-tiered-storage-wb:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-compression-libs
- run:
name: "run crashtest"
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS=--duration=10800 whitebox_crash_test_with_tiered_storage
@ -672,14 +529,10 @@ jobs:
build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
build-linux-java:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
environment:
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
steps:
- pre-steps
- install-gflags
- run:
name: "Set Java Environment"
command: |
@ -693,14 +546,10 @@ jobs:
- post-steps
build-linux-java-static:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
environment:
JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64
steps:
- pre-steps
- install-gflags
- run:
name: "Set Java Environment"
command: |
@ -786,34 +635,25 @@ jobs:
- post-steps
build-examples:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- pre-steps
- install-gflags
- run:
name: "Build examples"
command: |
OPT=-DTRAVIS V=1 make -j4 static_lib && cd examples && make -j4
make V=1 -j4 static_lib && cd examples && make V=1 -j4
- post-steps
build-cmake-mingw:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- pre-steps
- install-gflags
- run: sudo apt-get update -y && sudo apt-get install -y mingw-w64
- run: sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
- run: update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
- run:
name: "Build cmake-mingw"
command: |
sudo apt-get install snapd && sudo snap install cmake --beta --classic
export PATH=/snap/bin:$PATH
sudo apt-get install -y openjdk-8-jdk
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$JAVA_HOME/bin:$PATH
echo "JAVA_HOME=${JAVA_HOME}"
which java && java -version
@ -822,14 +662,12 @@ jobs:
- post-steps
build-linux-non-shm:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
environment:
TEST_TMPDIR: /tmp/rocksdb_test_tmp
steps:
- pre-steps
- install-gflags
- run: make V=1 -j32 check
- post-steps
@ -887,13 +725,10 @@ jobs:
- post-steps
build-format-compatible:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: 2xlarge
steps:
- pre-steps
- install-gflags
- install-compression-libs
- run:
name: "test"
command: |
@ -904,14 +739,10 @@ jobs:
- post-steps
build-fuzzers:
machine:
image: ubuntu-2004:202111-02
executor: linux-docker
resource_class: large
steps:
- pre-steps
- install-clang-13
- run: sudo apt-get update -y && sudo apt-get install -y cmake ninja-build binutils liblzma-dev libz-dev pkg-config autoconf libtool
- install-libprotobuf-mutator
- run:
name: "Build rocksdb lib"
command: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
@ -942,12 +773,7 @@ workflows:
jobs:
- build-linux-clang10-asan
- build-linux-clang10-ubsan
- build-linux-clang10-mini-tsan:
start_test: ""
end_test: "env_test"
- build-linux-clang10-mini-tsan:
start_test: "env_test"
end_test: ""
- build-linux-clang10-mini-tsan
- build-linux-shared_lib-alt_namespace-status_checked
jobs-linux-no-test-run:
jobs:

@ -0,0 +1,56 @@
# from official ubuntu 20.04
FROM ubuntu:20.04
# update system
RUN apt-get update && apt-get upgrade -y
# install basic tools
RUN apt-get install -y vim wget curl
# install tzdata noninteractive
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
# install git and default compilers
RUN apt-get install -y git gcc g++ clang clang-tools
# install basic package
RUN apt-get install -y lsb-release software-properties-common gnupg
# install gflags, tbb
RUN apt-get install -y libgflags-dev libtbb-dev
# install compression libs
RUN apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
# install cmake
RUN apt-get install -y cmake
# install clang-13
WORKDIR /root
RUN wget https://apt.llvm.org/llvm.sh
RUN chmod +x llvm.sh
RUN ./llvm.sh 13 all
# install gcc-7, 8, 10, 11, default is 9
RUN apt-get install -y gcc-7 g++-7
RUN apt-get install -y gcc-8 g++-8
RUN apt-get install -y gcc-10 g++-10
RUN add-apt-repository -y ppa:ubuntu-toolchain-r/test
RUN apt-get install -y gcc-11 g++-11
# install apt-get install -y valgrind
RUN apt-get install -y valgrind
# install folly depencencies
RUN apt-get install -y libgoogle-glog-dev
# install openjdk 8
RUN apt-get install -y openjdk-8-jdk
ENV JAVA_HOME /usr/lib/jvm/java-1.8.0-openjdk-amd64
# install mingw
RUN apt-get install -y mingw-w64
# install gtest-parallel package
RUN git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel
ENV PATH $PATH:/root/gtest-parallel
# install libprotobuf for fuzzers test
RUN apt-get install -y ninja-build binutils liblzma-dev libz-dev pkg-config autoconf libtool
RUN git clone --branch v1.0 https://github.com/google/libprotobuf-mutator.git ~/libprotobuf-mutator && cd ~/libprotobuf-mutator && git checkout ffd86a32874e5c08a143019aad1aaf0907294c9f && mkdir build && cd build && cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON && ninja && ninja install
ENV PKG_CONFIG_PATH /usr/local/OFF/:/root/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/
ENV PROTOC_BIN /root/libprotobuf-mutator/build/external.protobuf/bin/protoc
# install the latest google benchmark
RUN git clone --depth 1 --branch v1.7.0 https://github.com/google/benchmark.git ~/benchmark
RUN cd ~/benchmark && mkdir build && cd build && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0 && ninja && ninja install
# clean up
RUN rm -rf /var/lib/apt/lists/*
RUN rm -rf /root/benchmark

@ -2939,7 +2939,7 @@ TEST_P(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) {
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
}
TEST_P(ColumnFamilyTest, CreateAndDestoryOptions) {
TEST_P(ColumnFamilyTest, CreateAndDestroyOptions) {
std::unique_ptr<ColumnFamilyOptions> cfo(new ColumnFamilyOptions());
ColumnFamilyHandle* cfh;
Open();

@ -77,7 +77,7 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) {
options.compression = kNoCompression;
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
assert(s.ok());
assert(db);
@ -128,7 +128,7 @@ TEST_F(CompactFilesTest, MultipleLevel) {
options.listeners.emplace_back(collector);
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
ASSERT_OK(s);
ASSERT_NE(db, nullptr);
@ -211,7 +211,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
options.listeners.emplace_back(collector);
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
ASSERT_OK(s);
ASSERT_NE(db, nullptr);
@ -250,7 +250,7 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {
options.listeners.emplace_back(collector);
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
assert(s.ok());
assert(db);
@ -288,7 +288,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
options.listeners.emplace_back(collector);
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
ASSERT_OK(s);
assert(db);
@ -366,7 +366,7 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
options.compaction_filter = cf.get();
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
ASSERT_OK(s);
@ -404,7 +404,7 @@ TEST_F(CompactFilesTest, SentinelCompressionType) {
{CompactionStyle::kCompactionStyleLevel,
CompactionStyle::kCompactionStyleUniversal,
CompactionStyle::kCompactionStyleNone}) {
DestroyDB(db_name_, Options());
ASSERT_OK(DestroyDB(db_name_, Options()));
Options options;
options.compaction_style = compaction_style;
// L0: Snappy, L1: ZSTD, L2: Snappy
@ -458,7 +458,7 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) {
options.listeners.emplace_back(collector);
DB* db = nullptr;
DestroyDB(db_name_, options);
ASSERT_OK(DestroyDB(db_name_, options));
Status s = DB::Open(options, db_name_, &db);
ASSERT_OK(s);
assert(db);

@ -764,6 +764,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksOnCompact) {
delete db_;
db_ = nullptr;
s = DestroyDB(dbname_, options);
ASSERT_OK(s);
std::shared_ptr<mock::MockTableFactory> mock =
std::make_shared<mock::MockTableFactory>();
options.table_factory = mock;

@ -1718,7 +1718,7 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
delete metadata_ptr_;
metadata_ptr_ = nullptr;
DestroyDB(export_files_dir, options);
ASSERT_OK(DestroyDB(export_files_dir, options));
ReopenWithColumnFamilies({"default", "yoyo"}, options);

@ -52,7 +52,7 @@ class ManualCompactionTest : public testing::Test {
// Get rid of any state from an old run.
dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath(
"rocksdb_manual_compaction_test");
DestroyDB(dbname_, Options());
EXPECT_OK(DestroyDB(dbname_, Options()));
}
std::string dbname_;
@ -130,7 +130,7 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
delete options.compaction_filter;
delete db;
DestroyDB(dbname_, options);
ASSERT_OK(DestroyDB(dbname_, options));
}
}
@ -186,7 +186,7 @@ TEST_F(ManualCompactionTest, Test) {
// close database
delete db;
DestroyDB(dbname_, Options());
ASSERT_OK(DestroyDB(dbname_, Options()));
}
TEST_F(ManualCompactionTest, SkipLevel) {
@ -298,7 +298,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
delete filter;
delete db;
DestroyDB(dbname_, options);
ASSERT_OK(DestroyDB(dbname_, options));
}
} // anonymous namespace

@ -69,7 +69,7 @@ std::shared_ptr<DB> OpenDb(bool read_only = false) {
class PerfContextTest : public testing::Test {};
TEST_F(PerfContextTest, SeekIntoDeletion) {
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -205,7 +205,7 @@ TEST_F(PerfContextTest, StopWatchOverhead) {
}
void ProfileQueries(bool enabled_time = false) {
DestroyDB(kDbName, Options()); // Start this test with a fresh DB
ASSERT_OK(DestroyDB(kDbName, Options())); // Start this test with a fresh DB
auto db = OpenDb();
@ -518,7 +518,7 @@ TEST_F(PerfContextTest, KeyComparisonCount) {
// starts to become linear to the input size.
TEST_F(PerfContextTest, SeekKeyComparison) {
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -652,7 +652,7 @@ TEST_F(PerfContextTest, ToString) {
}
TEST_F(PerfContextTest, MergeOperatorTime) {
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
DB* db;
Options options;
options.create_if_missing = true;
@ -833,7 +833,7 @@ TEST_F(PerfContextTest, CPUTimer) {
return;
}
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;

@ -37,7 +37,8 @@ class WalManagerTest : public testing::Test {
table_cache_(NewLRUCache(50000, 16)),
write_buffer_manager_(db_options_.db_write_buffer_size),
current_log_number_(0) {
env_.reset(MockEnv::Create(Env::Default())), DestroyDB(dbname_, Options());
env_.reset(MockEnv::Create(Env::Default()));
EXPECT_OK(DestroyDB(dbname_, Options()));
}
void Init() {

@ -171,7 +171,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
DB* db;
DBImpl* db_impl;
DestroyDB(dbname, options);
ASSERT_OK(DestroyDB(dbname, options));
DBOptions db_options(options);
ColumnFamilyOptions cf_options(options);
@ -372,7 +372,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
delete db;
DestroyDB(dbname, options);
ASSERT_OK(DestroyDB(dbname, options));
}
}
@ -391,7 +391,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
DB* db;
DBImpl* db_impl;
DestroyDB(dbname, options);
ASSERT_OK(DestroyDB(dbname, options));
options.create_if_missing = true;
Status s = DB::Open(options, dbname, &db);
@ -441,7 +441,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
ASSERT_EQ("value.a2", value);
delete db;
DestroyDB(dbname, options);
ASSERT_OK(DestroyDB(dbname, options));
}
} // namespace ROCKSDB_NAMESPACE

46
env/env_test.cc vendored

@ -1078,11 +1078,20 @@ class IoctlFriendlyTmpdir {
}
}
// check if it's running test within a docker container, in which case, the
// file system inside `overlayfs` may not support FS_IOC_GETVERSION
// skip the tests
struct stat buffer;
if (stat("/.dockerenv", &buffer) == 0) {
is_supported_ = false;
return;
}
fprintf(stderr, "failed to find an ioctl-friendly temporary directory;"
" specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n");
std::abort();
#endif
}
}
~IoctlFriendlyTmpdir() {
rmdir(dir_.c_str());
@ -1092,8 +1101,12 @@ class IoctlFriendlyTmpdir {
return dir_;
}
bool is_supported() const { return is_supported_; }
private:
std::string dir_;
bool is_supported_ = true;
};
#ifndef ROCKSDB_LITE
@ -1102,8 +1115,10 @@ TEST_F(EnvPosixTest, PositionedAppend) {
EnvOptions options;
options.use_direct_writes = true;
options.use_mmap_writes = false;
IoctlFriendlyTmpdir ift;
ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options));
std::string fname = test::PerThreadDBPath(env_, "positioned_append");
SetupSyncPointsToMockDirectIO();
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, options));
const size_t kBlockSize = 4096;
const size_t kDataSize = kPageSize;
// Write a page worth of 'a'
@ -1119,7 +1134,7 @@ TEST_F(EnvPosixTest, PositionedAppend) {
// Verify the above
std::unique_ptr<SequentialFile> seq_file;
ASSERT_OK(env_->NewSequentialFile(ift.name() + "/f", &seq_file, options));
ASSERT_OK(env_->NewSequentialFile(fname, &seq_file, options));
size_t scratch_len = kPageSize * 2;
std::unique_ptr<char[]> scratch(new char[scratch_len]);
Slice result;
@ -1139,6 +1154,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
EnvOptions soptions;
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
IoctlFriendlyTmpdir ift;
if (!ift.is_supported()) {
ROCKSDB_GTEST_BYPASS(
"FS_IOC_GETVERSION is not supported by the filesystem");
return;
}
std::string fname = ift.name() + "/testfile";
std::unique_ptr<WritableFile> wfile;
ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
@ -1181,13 +1201,13 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
#ifdef ROCKSDB_FALLOCATE_PRESENT
TEST_P(EnvPosixTestWithParam, AllocateTest) {
if (env_ == Env::Default()) {
IoctlFriendlyTmpdir ift;
std::string fname = ift.name() + "/preallocate_testfile";
SetupSyncPointsToMockDirectIO();
std::string fname = test::PerThreadDBPath(env_, "preallocate_testfile");
// Try fallocate in a file to see whether the target file system supports
// it.
// Skip the test if fallocate is not supported.
std::string fname_test_fallocate = ift.name() + "/preallocate_testfile_2";
std::string fname_test_fallocate =
test::PerThreadDBPath(env_, "preallocate_testfile_2");
int fd = -1;
do {
fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
@ -1277,6 +1297,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) {
// Create the files
IoctlFriendlyTmpdir ift;
if (!ift.is_supported()) {
ROCKSDB_GTEST_BYPASS(
"FS_IOC_GETVERSION is not supported by the filesystem");
return;
}
std::vector<std::string> fnames;
for (int i = 0; i < 1000; ++i) {
fnames.push_back(ift.name() + "/" + "testfile" + std::to_string(i));
@ -1318,6 +1343,11 @@ TEST_P(EnvPosixTestWithParam, DISABLED_RandomAccessUniqueIDDeletes) {
soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
IoctlFriendlyTmpdir ift;
if (!ift.is_supported()) {
ROCKSDB_GTEST_BYPASS(
"FS_IOC_GETVERSION is not supported by the filesystem");
return;
}
std::string fname = ift.name() + "/" + "testfile";
// Check that after file is deleted we don't get same ID again in a new

@ -48,7 +48,7 @@ class Status {
if (!checked_) {
fprintf(stderr, "Failed to check Status %p\n", this);
port::PrintStack();
abort();
std::abort();
}
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
}

@ -608,7 +608,7 @@ InlineSkipList<Comparator>::FindRandomEntry() const {
}
// There is a special case where x could still be the head_
// (note that the head_ contains no key).
return x == head_ ? head_->Next(0) : x;
return x == head_ && head_ != nullptr ? head_->Next(0) : x;
}
template <class Comparator>

@ -194,7 +194,7 @@ TEST_F(ChargeWriteBufferTest, Basic) {
ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
// Destory write buffer manger should free everything
// Destroy write buffer manger should free everything
wbf.reset();
ASSERT_EQ(cache->GetPinnedUsage(), 0);
}

@ -37,9 +37,12 @@ class SanityTest {
Options options = GetOptions();
options.create_if_missing = true;
std::string dbname = path_ + Name();
DestroyDB(dbname, options);
Status s = DestroyDB(dbname, options);
if (!s.ok()) {
return s;
}
DB* db = nullptr;
Status s = DB::Open(options, dbname, &db);
s = DB::Open(options, dbname, &db);
std::unique_ptr<DB> db_guard(db);
if (!s.ok()) {
return s;

@ -889,7 +889,7 @@ TEST_F(LdbCmdTest, LoadCFOptionsAndOverride) {
DB* db = nullptr;
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
DestroyDB(dbname, opts);
ASSERT_OK(DestroyDB(dbname, opts));
ASSERT_OK(DB::Open(opts, dbname, &db));
ColumnFamilyHandle* cf_handle;
@ -932,7 +932,7 @@ TEST_F(LdbCmdTest, UnsafeRemoveSstFile) {
DB* db = nullptr;
std::string dbname = test::PerThreadDBPath(Env::Default(), "ldb_cmd_test");
DestroyDB(dbname, opts);
ASSERT_OK(DestroyDB(dbname, opts));
ASSERT_OK(DB::Open(opts, dbname, &db));
// Create three SST files
@ -1041,7 +1041,7 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) {
DB* db = nullptr;
std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test");
DestroyDB(dbname, opts);
ASSERT_OK(DestroyDB(dbname, opts));
ASSERT_OK(DB::Open(opts, dbname, &db));
std::array<Temperature, 5> kTestTemps = {
@ -1123,8 +1123,8 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) {
std::string old_dbname = test::PerThreadDBPath(env, "ldb_cmd_test");
std::string new_dbname = old_dbname + "_2";
DestroyDB(old_dbname, opts);
DestroyDB(new_dbname, opts);
ASSERT_OK(DestroyDB(old_dbname, opts));
ASSERT_OK(DestroyDB(new_dbname, opts));
char old_arg[1024];
snprintf(old_arg, sizeof(old_arg), "--db=%s", old_dbname.c_str());
@ -1168,7 +1168,7 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) {
0, LDBCommandRunner::RunCommand(5, argv4, opts, LDBOptions(), nullptr));
ASSERT_EQ(
0, LDBCommandRunner::RunCommand(5, argv5, opts, LDBOptions(), nullptr));
DestroyDB(new_dbname, opts);
ASSERT_OK(DestroyDB(new_dbname, opts));
}
} // namespace ROCKSDB_NAMESPACE

@ -22,7 +22,7 @@ class ReduceLevelTest : public testing::Test {
public:
ReduceLevelTest() {
dbname_ = test::PerThreadDBPath("db_reduce_levels_test");
DestroyDB(dbname_, Options());
EXPECT_OK(DestroyDB(dbname_, Options()));
db_ = nullptr;
}

@ -657,7 +657,7 @@ class BackupEngineTest : public testing::Test {
engine_options_->max_background_operations = 7;
// delete old files in db
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
// delete old LATEST_BACKUP file, which some tests create for compatibility
// testing.
@ -993,6 +993,12 @@ class BackupEngineTest : public testing::Test {
Options options_;
protected:
void DestroyDBWithoutCheck(const std::string& dbname,
const Options& options) {
// DestroyDB may fail because the db might not be existed for some tests
DestroyDB(dbname, options).PermitUncheckedError();
}
std::unique_ptr<BackupEngineOptions> engine_options_;
}; // BackupEngineTest
@ -1033,7 +1039,7 @@ TEST_F(BackupEngineTest, FileCollision) {
// If the db directory has been cleaned up, it is sensitive to file
// collision.
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
// open fresh DB, but old backups present
OpenDBAndBackupEngine(false /* destroy_old_data */, false /* dummy */,
@ -1054,7 +1060,7 @@ TEST_F(BackupEngineTest, FileCollision) {
CloseDBAndBackupEngine();
// delete old data
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
}
}
@ -1099,7 +1105,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) {
// second iter -- don't flush before backup
for (int iter = 0; iter < 2; ++iter) {
// delete old data
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
bool destroy_data = true;
// every iteration --
@ -1118,7 +1124,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) {
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), iter == 0))
<< "iter: " << iter << ", idx: " << i;
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
// ---- make sure it's empty ----
DB* db = OpenDB();
@ -1146,7 +1152,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) {
const int max_key = keys_iteration * 4 + 10;
Random rnd(7);
// delete old data
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
// TODO: Implement & test db_paths support in backup (not supported in
// restore)
@ -1171,7 +1177,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) {
}
// close and destroy
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
// ---- make sure it's empty ----
DB* db = OpenDB();
@ -1547,7 +1553,7 @@ TEST_F(BackupEngineTest, TableFileCorruptedBeforeBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
// Enable table file checksum in DB manifest
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
@ -1580,7 +1586,7 @@ TEST_F(BackupEngineTest, BlobFileCorruptedBeforeBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
// Enable file checksum in DB manifest
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
@ -1614,7 +1620,7 @@ TEST_P(BackupEngineTestWithParam, TableFileCorruptedBeforeBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
// Enable table checksums in DB manifest
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
@ -1643,7 +1649,7 @@ TEST_P(BackupEngineTestWithParam, BlobFileCorruptedBeforeBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
// Enable blob file checksums in DB manifest
options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
@ -1695,7 +1701,7 @@ TEST_F(BackupEngineTest, TableFileWithoutDbChecksumCorruptedDuringBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) {
@ -1734,7 +1740,7 @@ TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) {
CloseDBAndBackupEngine();
// delete old files in db
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
}
}
@ -2208,7 +2214,7 @@ TEST_F(BackupEngineTest, TableFileCorruptionBeforeIncremental) {
}
CloseDBAndBackupEngine();
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
}
}
}
@ -2273,7 +2279,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
// Even though we have "the same" DB state as backup 1, we need
// to restore to recreate the same conditions as later restore.
db_.reset();
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
CloseDBAndBackupEngine();
@ -2294,7 +2300,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
// Restore backup 1 (again)
db_.reset();
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_));
CloseDBAndBackupEngine();
@ -2332,7 +2338,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) {
EXPECT_EQ(children.size(), 3U); // Another SST added
}
CloseDBAndBackupEngine();
ASSERT_OK(DestroyDB(dbname_, options_));
DestroyDBWithoutCheck(dbname_, options_);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
@ -2609,7 +2615,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) {
// destroy old data
Options options;
options.env = special_env.get();
DestroyDB(dbname_, options);
DestroyDBWithoutCheck(dbname_, options);
if (custom_rate_limiter) {
std::shared_ptr<RateLimiter> backup_rate_limiter =
@ -2699,7 +2705,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
Options options;
options.env = special_env.get();
DestroyDB(dbname_, options);
DestroyDBWithoutCheck(dbname_, options);
// Rate limiter uses `CondVar::TimedWait()`, which does not have access to the
// `Env` to advance its time according to the fake wait duration. The
// workaround is to install a callback that advance the `Env`'s mock time.
@ -2743,7 +2749,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
CloseDBAndBackupEngine();
AssertBackupConsistency(backup_id, 0, 10000, 10010);
DestroyDB(dbname_, options);
DestroyDBWithoutCheck(dbname_, options);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
@ -2760,7 +2766,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) {
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
engine_options_->backup_rate_limiter = backup_rate_limiter;
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
kShareWithChecksum /* shared_option */);
FillDB(db_.get(), 0, 10);
@ -2784,7 +2790,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) {
total_bytes_through_with_no_read_charged);
CloseDBAndBackupEngine();
AssertBackupConsistency(1, 0, 10, 20);
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
}
TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
@ -2798,20 +2804,20 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */));
engine_options_->restore_rate_limiter = restore_rate_limiter;
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenDBAndBackupEngine(true /* destroy_old_data */);
FillDB(db_.get(), 0, 10);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
false /* flush_before_backup */));
CloseDBAndBackupEngine();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenBackupEngine(false /* destroy_old_data */);
ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_));
std::int64_t total_bytes_through_with_no_read_charged =
restore_rate_limiter->GetTotalBytesThrough();
CloseBackupEngine();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
restore_rate_limiter.reset(NewGenericRateLimiter(
restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */,
@ -2826,7 +2832,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) {
total_bytes_through_with_no_read_charged * 2);
CloseBackupEngine();
AssertBackupConsistency(1, 0, 10, 20);
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
}
TEST_P(BackupEngineRateLimitingTestWithParam,
@ -2840,7 +2846,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam,
10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */));
engine_options_->backup_rate_limiter = backup_rate_limiter;
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenDBAndBackupEngine(true /* destroy_old_data */);
FillDB(db_.get(), 0, 10);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(),
@ -2857,7 +2863,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam,
EXPECT_GT(engine_options_->backup_rate_limiter->GetTotalBytesThrough(),
total_bytes_through_before_initialize);
CloseDBAndBackupEngine();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
}
class BackupEngineRateLimitingTestWithParam2
@ -2908,7 +2914,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
kShareWithChecksum /* shared_option */);
@ -2954,7 +2960,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
total_bytes_through_before_initialize);
CloseDBAndBackupEngine();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
OpenBackupEngine(false /* destroy_old_data */);
int64_t total_bytes_through_before_restore =
engine_options_->restore_rate_limiter->GetTotalBytesThrough();
@ -2965,7 +2971,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
total_bytes_through_before_restore);
CloseBackupEngine();
DestroyDB(dbname_, Options());
DestroyDBWithoutCheck(dbname_, Options());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
@ -2975,14 +2981,14 @@ TEST_P(BackupEngineRateLimitingTestWithParam2,
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
TEST_F(BackupEngineTest, ReadOnlyBackupEngine) {
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
OpenDBAndBackupEngine(true);
FillDB(db_.get(), 0, 100);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
FillDB(db_.get(), 100, 200);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
engine_options_->destroy_old_data = false;
test_backup_fs_->ClearWrittenFiles();
@ -3007,7 +3013,7 @@ TEST_F(BackupEngineTest, ReadOnlyBackupEngine) {
}
TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
options_.write_dbid_to_manifest = false;
OpenDBAndBackupEngine(true);
@ -3020,7 +3026,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
FillDB(db_.get(), 100, 200);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), /*flush*/ false));
db_.reset(); // CloseDB
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
BackupInfo backup_info;
// First, check that we get empty fields without include_file_details
ASSERT_OK(backup_engine_->GetBackupInfo(/*id*/ 1U, &backup_info,
@ -3073,7 +3079,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) {
}
TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) {
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
// Too big for this small DB
engine_options_->callback_trigger_interval_size = 100000;
OpenDBAndBackupEngine(true);
@ -3093,11 +3099,11 @@ TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) {
[&is_callback_invoked]() { is_callback_invoked = true; }));
ASSERT_TRUE(is_callback_invoked);
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, GarbageCollectionBeforeBackup) {
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
OpenDBAndBackupEngine(true);
ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared"));
@ -3151,7 +3157,7 @@ TEST_F(BackupEngineTest, EnvFailures) {
// Read from meta-file failure
{
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
OpenDBAndBackupEngine(true);
FillDB(db_.get(), 0, 100);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
@ -3176,7 +3182,7 @@ TEST_F(BackupEngineTest, EnvFailures) {
// Verify manifest can roll while a backup is being created with the old
// manifest.
TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) {
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
options_.max_manifest_file_size = 0; // always rollover manifest for file add
OpenDBAndBackupEngine(true);
FillDB(db_.get(), 0, 100, kAutoFlushOnly);
@ -3213,7 +3219,7 @@ TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) {
ASSERT_TRUE(db_chroot_env_->FileExists(prev_manifest_path).IsNotFound());
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
AssertBackupConsistency(0, 0, 100);
}
@ -3265,7 +3271,7 @@ TEST_F(BackupEngineTest, BackupWithMetadata) {
ASSERT_EQ(std::to_string(i), backup_info.app_metadata);
}
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, BinaryMetadata) {
@ -3283,7 +3289,7 @@ TEST_F(BackupEngineTest, BinaryMetadata) {
ASSERT_EQ(1, backup_infos.size());
ASSERT_EQ(binaryMetadata, backup_infos[0].app_metadata);
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, MetadataTooLarge) {
@ -3292,7 +3298,7 @@ TEST_F(BackupEngineTest, MetadataTooLarge) {
ASSERT_NOK(
backup_engine_->CreateNewBackupWithMetadata(db_.get(), largeMetadata));
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, MetaSchemaVersion2_SizeCorruption) {
@ -3734,7 +3740,7 @@ TEST_F(BackupEngineTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) {
ASSERT_EQ(2, backup_infos[1].backup_id);
ASSERT_EQ(4, backup_infos[2].backup_id);
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
TEST_F(BackupEngineTest, CreateWhenLatestBackupCorrupted) {
@ -3942,7 +3948,7 @@ TEST_F(BackupEngineTest, BackgroundThreadCpuPriority) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
CloseDBAndBackupEngine();
DestroyDB(dbname_, options_);
DestroyDBWithoutCheck(dbname_, options_);
}
// Populates `*total_size` with the size of all files under `backup_dir`.

@ -122,7 +122,8 @@ private:
class CassandraFunctionalTest : public testing::Test {
public:
CassandraFunctionalTest() {
DestroyDB(kDbName, Options()); // Start each test with a fresh DB
EXPECT_OK(
DestroyDB(kDbName, Options())); // Start each test with a fresh DB
}
std::shared_ptr<DB> OpenDb() {

@ -39,7 +39,7 @@ class RangeLockingTest : public ::testing::Test {
options.create_if_missing = true;
dbname = test::PerThreadDBPath("range_locking_testdb");
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
range_lock_mgr.reset(NewRangeLockManager(nullptr));
txn_db_options.lock_mgr_handle = range_lock_mgr;
@ -55,7 +55,7 @@ class RangeLockingTest : public ::testing::Test {
// seems to be a bug in btrfs that the makes readdir return recently
// unlink-ed files. By using the default fs we simply ignore errors resulted
// from attempting to delete such files in DestroyDB.
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
}
PessimisticTransaction* NewTxn(

@ -39,12 +39,12 @@ class OptimisticTransactionTest
options.merge_operator.reset(new TestPutOperator());
dbname = test::PerThreadDBPath("optimistic_transaction_testdb");
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
Open();
}
~OptimisticTransactionTest() override {
delete txn_db;
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
}
void Reopen() {

@ -57,12 +57,12 @@ class TtlTest : public testing::Test {
options_.max_compaction_bytes = 1;
// compaction should take place always from level0 for determinism
db_ttl_ = nullptr;
DestroyDB(dbname_, Options());
EXPECT_OK(DestroyDB(dbname_, Options()));
}
~TtlTest() override {
CloseTtl();
DestroyDB(dbname_, Options());
EXPECT_OK(DestroyDB(dbname_, Options()));
}
// Open database with TTL support when TTL not provided with db_ttl_ pointer

@ -246,7 +246,7 @@ class WBWIBaseTest : public testing::Test {
MergeOperators::CreateFromStringId("stringappend");
options_.create_if_missing = true;
dbname_ = test::PerThreadDBPath("write_batch_with_index_test");
DestroyDB(dbname_, options_);
EXPECT_OK(DestroyDB(dbname_, options_));
batch_.reset(new WriteBatchWithIndex(BytewiseComparator(), 20, overwrite));
}
@ -254,7 +254,7 @@ class WBWIBaseTest : public testing::Test {
if (db_ != nullptr) {
ReleaseSnapshot();
delete db_;
DestroyDB(dbname_, options_);
EXPECT_OK(DestroyDB(dbname_, options_));
}
}

Loading…
Cancel
Save