From 5d3aefb6821eef8816fdee74262702b1d9255f50 Mon Sep 17 00:00:00 2001 From: Jay Zhuang Date: Wed, 10 Aug 2022 17:34:38 -0700 Subject: [PATCH] Migrate to docker for CI run (#10496) Summary: Moved linux builds to using docker to avoid CI instability caused by dependency installation site down. Added the `Dockerfile` which is used to build the image. The build time is also significantly reduced, because no dependencies installation and with using 2xlarge+ instance for slow build (like tsan test). Also fixed a few issues detected while building this: * `DestoryDB()` Status not checked for a few tests * nullptr might be used in `inlineskiplist.cc` Pull Request resolved: https://github.com/facebook/rocksdb/pull/10496 Test Plan: CI Reviewed By: ajkr Differential Revision: D38554200 Pulled By: jay-zhuang fbshipit-source-id: 16e8fb2bf07b9c84bb27fb18421c4d54f2f248fd --- .circleci/config.yml | 354 +++++------------- build_tools/ubuntu20_image/Dockerfile | 56 +++ db/column_family_test.cc | 2 +- db/compact_files_test.cc | 16 +- db/corruption_test.cc | 1 + db/db_block_cache_test.cc | 2 +- db/manual_compaction_test.cc | 8 +- db/perf_context_test.cc | 10 +- db/wal_manager_test.cc | 3 +- db/write_callback_test.cc | 8 +- env/env_test.cc | 46 ++- include/rocksdb/status.h | 2 +- memtable/inlineskiplist.h | 2 +- memtable/write_buffer_manager_test.cc | 2 +- tools/db_sanity_test.cc | 7 +- tools/ldb_cmd_test.cc | 12 +- tools/reduce_levels_test.cc | 2 +- utilities/backup/backup_engine_test.cc | 98 ++--- .../cassandra/cassandra_functional_test.cc | 3 +- .../lock/range/range_locking_test.cc | 4 +- .../optimistic_transaction_test.cc | 4 +- utilities/ttl/ttl_test.cc | 4 +- .../write_batch_with_index_test.cc | 4 +- 23 files changed, 287 insertions(+), 363 deletions(-) create mode 100644 build_tools/ubuntu20_image/Dockerfile diff --git a/.circleci/config.yml b/.circleci/config.yml index e1bd6dfda..36db6a396 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,11 +3,6 @@ version: 2.1 orbs: win: circleci/windows@2.4.0 -aliases: - - ¬ify-on-main-failure - fail_only: true - only_for_branches: main - commands: install-cmake-on-macos: steps: @@ -61,35 +56,21 @@ commands: - store_artifacts: # store LOG for debugging if there's any path: LOG - run: # on fail, compress Test Logs for diagnosing the issue - name: Compress Test Logs - command: tar -cvzf t.tar.gz t - when: on_fail + name: Compress Test Logs + command: tar -cvzf t.tar.gz t + when: on_fail - store_artifacts: # on fail, store Test Logs for diagnosing the issue path: t.tar.gz destination: test_logs when: on_fail - - install-clang-10: - steps: - - run: - name: Install Clang 10 + - run: # store core dumps if there's any command: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - - echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list - echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" | sudo tee -a /etc/apt/sources.list - echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable - sudo apt-get update -y && sudo apt-get install -y clang-10 - - install-clang-13: - steps: - - run: - name: Install Clang 13 - command: | - echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list - echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list - echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - - sudo apt-get update -y && sudo apt-get install -y clang-13 + mkdir -p /tmp/core_dumps + cp core.* /tmp/core_dumps + when: on_fail + - store_artifacts: + path: /tmp/core_dumps + when: on_fail install-gflags: steps: @@ -98,33 +79,6 @@ commands: command: | sudo apt-get update -y && sudo apt-get install -y libgflags-dev - install-benchmark: - steps: - - run: - name: Install ninja build - command: sudo apt-get update -y && sudo apt-get install -y ninja-build - - run: - name: Install benchmark - command: | - git clone --depth 1 --branch v1.6.1 https://github.com/google/benchmark.git ~/benchmark - cd ~/benchmark && mkdir build && cd build - cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0 - ninja && sudo ninja install - - install-valgrind: - steps: - - run: - name: Install valgrind - command: sudo apt-get update -y && sudo apt-get install -y valgrind - - upgrade-cmake: - steps: - - run: - name: Upgrade cmake - command: | - sudo apt remove --purge cmake - sudo snap install cmake --classic - install-gflags-on-macos: steps: - run: @@ -132,48 +86,8 @@ commands: command: | HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags - install-gtest-parallel: - steps: - - run: - name: Install gtest-parallel - command: | - git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel - echo 'export PATH=$HOME/gtest-parallel:$PATH' >> $BASH_ENV - - install-compression-libs: - steps: - - run: - name: Install compression libs - command: | - sudo apt-get update -y && sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev - - install-streaming-compress-libs: - steps: - - run: - name: Install streaming compression libs - command: | - sudo apt-get update -y && sudo apt-get install -y libzstd-dev - - install-libprotobuf-mutator: - steps: - - run: - name: Install libprotobuf-mutator libs - command: | - git clone -b v1.0 git@github.com:google/libprotobuf-mutator.git ~/libprotobuf-mutator - cd ~/libprotobuf-mutator && git checkout ffd86a32874e5c08a143019aad1aaf0907294c9f && mkdir build && cd build - cmake .. -GNinja -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang++-13 -DCMAKE_BUILD_TYPE=Release -DLIB_PROTO_MUTATOR_DOWNLOAD_PROTOBUF=ON - ninja && sudo ninja install - - run: - name: Setup environment variables - command: | - echo "export PKG_CONFIG_PATH=/usr/local/OFF/:~/libprotobuf-mutator/build/external.protobuf/lib/pkgconfig/" >> $BASH_ENV - echo "export PROTOC_BIN=~/libprotobuf-mutator/build/external.protobuf/bin/protoc" >> $BASH_ENV setup-folly: steps: - - run: - name: Install folly dependencies - command: | - sudo apt-get install libgoogle-glog-dev - run: name: Checkout folly sources command: | @@ -221,6 +135,21 @@ executors: image: 'windows-server-2019-vs2019:stable' resource_class: windows.2xlarge shell: bash.exe + linux-docker: + docker: + # The image configuration is build_tools/ubuntu20_image/Dockerfile + # To update and build the image: + # $ cd build_tools/ubuntu20_image + # $ docker build -t zjay437/rocksdb:0.5 . + # $ docker push zjay437/rocksdb:0.5 + # `zjay437` is the account name for zjay@meta.com which readwrite token is shared internally. To login: + # $ docker login --username zjay437 + # Or please feel free to change it to your docker hub account for hosting the image, meta employee should already have the account and able to login with SSO. + # To avoid impacting the existing CI runs, please bump the version every time creating a new image + # to run the CI image environment locally: + # $ docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it zjay437/rocksdb:0.5 bash + # option `--cap-add=SYS_PTRACE --security-opt seccomp=unconfined` is used to enable gdb to attach an existing process + - image: zjay437/rocksdb:0.5 jobs: build-macos: @@ -272,172 +201,132 @@ jobs: - post-steps build-linux: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: 2xlarge steps: - pre-steps - - install-gflags - - install-streaming-compress-libs - run: make V=1 J=32 -j32 check - post-steps build-linux-encrypted_env-no_compression: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: 2xlarge steps: - pre-steps - - install-gflags - run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check - run: | ./sst_dump --help | egrep -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression - post-steps build-linux-shared_lib-alt_namespace-status_checked: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: 2xlarge steps: - pre-steps - - install-gflags - run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check - post-steps build-linux-release: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: 2xlarge steps: - checkout # check out the code in the project directory - run: make V=1 -j32 release - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - install-gflags - - run: make V=1 -j32 release - run: ./db_stress --version # ensure with gflags + - run: make clean + - run: apt-get remove -y libgflags-dev + - run: make V=1 -j32 release + - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - post-steps build-linux-release-rtti: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: xlarge steps: - checkout # check out the code in the project directory - - run: make clean - run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - run: sudo apt-get update -y && sudo apt-get install -y libgflags-dev + - run: ./db_stress --version # ensure with gflags - run: make clean + - run: apt-get remove -y libgflags-dev - run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench - - run: ./db_stress --version # ensure with gflags + - run: if ./db_stress --version; then false; else true; fi # ensure without gflags build-linux-lite: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: large steps: - pre-steps - - install-gflags - run: LITE=1 make V=1 J=8 -j8 check - post-steps build-linux-lite-release: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: large steps: - checkout # check out the code in the project directory - run: LITE=1 make V=1 -j8 release - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - install-gflags - - run: LITE=1 make V=1 -j8 release - run: ./db_stress --version # ensure with gflags + - run: make clean + - run: apt-get remove -y libgflags-dev + - run: LITE=1 make V=1 -j8 release + - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - post-steps build-linux-clang-no_test_run: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: xlarge steps: - checkout # check out the code in the project directory - - run: sudo apt-get update -y && sudo apt-get install -y clang libgflags-dev libtbb-dev - run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all - post-steps build-linux-clang10-asan: - machine: - image: ubuntu-2004:202111-02 + executor: linux-docker resource_class: 2xlarge steps: - pre-steps - - install-gflags - - install-clang-10 - run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out - post-steps build-linux-clang10-mini-tsan: - machine: - image: ubuntu-2004:202111-02 - resource_class: 2xlarge - # find test list by `make list_all_tests` - parameters: - start_test: - default: "" - type: string - end_test: - default: "" - type: string + executor: linux-docker + resource_class: 2xlarge+ steps: - pre-steps - - install-gflags - - install-clang-10 - - install-gtest-parallel - - run: - name: "Build unit tests" - command: | - echo "env: $(env)" - ROCKSDBTESTS_START=<> ROCKSDBTESTS_END=<> ROCKSDBTESTS_SUBSET_TESTS_TO_FILE=/tmp/test_list COMPILE_WITH_TSAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 --output-sync=target build_subset_tests - - run: - name: "Run unit tests in parallel" - command: | - sed -i 's/[[:space:]]*$//; s/ / \.\//g; s/.*/.\/&/' /tmp/test_list - cat /tmp/test_list - gtest-parallel $(TEST_BGCompactionsAllowed()); } -TEST_P(ColumnFamilyTest, CreateAndDestoryOptions) { +TEST_P(ColumnFamilyTest, CreateAndDestroyOptions) { std::unique_ptr cfo(new ColumnFamilyOptions()); ColumnFamilyHandle* cfh; Open(); diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index df716f639..196b6c627 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -77,7 +77,7 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) { options.compression = kNoCompression; DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); assert(s.ok()); assert(db); @@ -128,7 +128,7 @@ TEST_F(CompactFilesTest, MultipleLevel) { options.listeners.emplace_back(collector); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); ASSERT_OK(s); ASSERT_NE(db, nullptr); @@ -211,7 +211,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) { options.listeners.emplace_back(collector); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); ASSERT_OK(s); ASSERT_NE(db, nullptr); @@ -250,7 +250,7 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) { options.listeners.emplace_back(collector); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); assert(s.ok()); assert(db); @@ -288,7 +288,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { options.listeners.emplace_back(collector); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); ASSERT_OK(s); assert(db); @@ -366,7 +366,7 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { options.compaction_filter = cf.get(); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); ASSERT_OK(s); @@ -404,7 +404,7 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { {CompactionStyle::kCompactionStyleLevel, CompactionStyle::kCompactionStyleUniversal, CompactionStyle::kCompactionStyleNone}) { - DestroyDB(db_name_, Options()); + ASSERT_OK(DestroyDB(db_name_, Options())); Options options; options.compaction_style = compaction_style; // L0: Snappy, L1: ZSTD, L2: Snappy @@ -458,7 +458,7 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) { options.listeners.emplace_back(collector); DB* db = nullptr; - DestroyDB(db_name_, options); + ASSERT_OK(DestroyDB(db_name_, options)); Status s = DB::Open(options, db_name_, &db); ASSERT_OK(s); assert(db); diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 5d6ef2332..b03692979 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -764,6 +764,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksOnCompact) { delete db_; db_ = nullptr; s = DestroyDB(dbname_, options); + ASSERT_OK(s); std::shared_ptr mock = std::make_shared(); options.table_factory = mock; diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 68221d4d0..89c351fd2 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -1718,7 +1718,7 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) { delete metadata_ptr_; metadata_ptr_ = nullptr; - DestroyDB(export_files_dir, options); + ASSERT_OK(DestroyDB(export_files_dir, options)); ReopenWithColumnFamilies({"default", "yoyo"}, options); diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index 2eb4812c7..df324d928 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -52,7 +52,7 @@ class ManualCompactionTest : public testing::Test { // Get rid of any state from an old run. dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath( "rocksdb_manual_compaction_test"); - DestroyDB(dbname_, Options()); + EXPECT_OK(DestroyDB(dbname_, Options())); } std::string dbname_; @@ -130,7 +130,7 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) { delete options.compaction_filter; delete db; - DestroyDB(dbname_, options); + ASSERT_OK(DestroyDB(dbname_, options)); } } @@ -186,7 +186,7 @@ TEST_F(ManualCompactionTest, Test) { // close database delete db; - DestroyDB(dbname_, Options()); + ASSERT_OK(DestroyDB(dbname_, Options())); } TEST_F(ManualCompactionTest, SkipLevel) { @@ -298,7 +298,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { delete filter; delete db; - DestroyDB(dbname_, options); + ASSERT_OK(DestroyDB(dbname_, options)); } } // anonymous namespace diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 69386fbd2..a8aec686a 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -69,7 +69,7 @@ std::shared_ptr OpenDb(bool read_only = false) { class PerfContextTest : public testing::Test {}; TEST_F(PerfContextTest, SeekIntoDeletion) { - DestroyDB(kDbName, Options()); + ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -205,7 +205,7 @@ TEST_F(PerfContextTest, StopWatchOverhead) { } void ProfileQueries(bool enabled_time = false) { - DestroyDB(kDbName, Options()); // Start this test with a fresh DB + ASSERT_OK(DestroyDB(kDbName, Options())); // Start this test with a fresh DB auto db = OpenDb(); @@ -518,7 +518,7 @@ TEST_F(PerfContextTest, KeyComparisonCount) { // starts to become linear to the input size. TEST_F(PerfContextTest, SeekKeyComparison) { - DestroyDB(kDbName, Options()); + ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -652,7 +652,7 @@ TEST_F(PerfContextTest, ToString) { } TEST_F(PerfContextTest, MergeOperatorTime) { - DestroyDB(kDbName, Options()); + ASSERT_OK(DestroyDB(kDbName, Options())); DB* db; Options options; options.create_if_missing = true; @@ -833,7 +833,7 @@ TEST_F(PerfContextTest, CPUTimer) { return; } - DestroyDB(kDbName, Options()); + ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index 72c258f54..cbf866ce3 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -37,7 +37,8 @@ class WalManagerTest : public testing::Test { table_cache_(NewLRUCache(50000, 16)), write_buffer_manager_(db_options_.db_write_buffer_size), current_log_number_(0) { - env_.reset(MockEnv::Create(Env::Default())), DestroyDB(dbname_, Options()); + env_.reset(MockEnv::Create(Env::Default())); + EXPECT_OK(DestroyDB(dbname_, Options())); } void Init() { diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 423b2a2aa..3c8c9b14e 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -171,7 +171,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) { DB* db; DBImpl* db_impl; - DestroyDB(dbname, options); + ASSERT_OK(DestroyDB(dbname, options)); DBOptions db_options(options); ColumnFamilyOptions cf_options(options); @@ -372,7 +372,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) { ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence()); delete db; - DestroyDB(dbname, options); + ASSERT_OK(DestroyDB(dbname, options)); } } @@ -391,7 +391,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { DB* db; DBImpl* db_impl; - DestroyDB(dbname, options); + ASSERT_OK(DestroyDB(dbname, options)); options.create_if_missing = true; Status s = DB::Open(options, dbname, &db); @@ -441,7 +441,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { ASSERT_EQ("value.a2", value); delete db; - DestroyDB(dbname, options); + ASSERT_OK(DestroyDB(dbname, options)); } } // namespace ROCKSDB_NAMESPACE diff --git a/env/env_test.cc b/env/env_test.cc index 4c562620e..866f3eabe 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1078,11 +1078,20 @@ class IoctlFriendlyTmpdir { } } + // check if it's running test within a docker container, in which case, the + // file system inside `overlayfs` may not support FS_IOC_GETVERSION + // skip the tests + struct stat buffer; + if (stat("/.dockerenv", &buffer) == 0) { + is_supported_ = false; + return; + } + fprintf(stderr, "failed to find an ioctl-friendly temporary directory;" " specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n"); std::abort(); #endif -} + } ~IoctlFriendlyTmpdir() { rmdir(dir_.c_str()); @@ -1092,8 +1101,12 @@ class IoctlFriendlyTmpdir { return dir_; } + bool is_supported() const { return is_supported_; } + private: std::string dir_; + + bool is_supported_ = true; }; #ifndef ROCKSDB_LITE @@ -1102,8 +1115,10 @@ TEST_F(EnvPosixTest, PositionedAppend) { EnvOptions options; options.use_direct_writes = true; options.use_mmap_writes = false; - IoctlFriendlyTmpdir ift; - ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options)); + std::string fname = test::PerThreadDBPath(env_, "positioned_append"); + SetupSyncPointsToMockDirectIO(); + + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, options)); const size_t kBlockSize = 4096; const size_t kDataSize = kPageSize; // Write a page worth of 'a' @@ -1119,7 +1134,7 @@ TEST_F(EnvPosixTest, PositionedAppend) { // Verify the above std::unique_ptr seq_file; - ASSERT_OK(env_->NewSequentialFile(ift.name() + "/f", &seq_file, options)); + ASSERT_OK(env_->NewSequentialFile(fname, &seq_file, options)); size_t scratch_len = kPageSize * 2; std::unique_ptr scratch(new char[scratch_len]); Slice result; @@ -1139,6 +1154,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) { EnvOptions soptions; soptions.use_direct_reads = soptions.use_direct_writes = direct_io_; IoctlFriendlyTmpdir ift; + if (!ift.is_supported()) { + ROCKSDB_GTEST_BYPASS( + "FS_IOC_GETVERSION is not supported by the filesystem"); + return; + } std::string fname = ift.name() + "/testfile"; std::unique_ptr wfile; ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions)); @@ -1181,13 +1201,13 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) { #ifdef ROCKSDB_FALLOCATE_PRESENT TEST_P(EnvPosixTestWithParam, AllocateTest) { if (env_ == Env::Default()) { - IoctlFriendlyTmpdir ift; - std::string fname = ift.name() + "/preallocate_testfile"; - + SetupSyncPointsToMockDirectIO(); + std::string fname = test::PerThreadDBPath(env_, "preallocate_testfile"); // Try fallocate in a file to see whether the target file system supports // it. // Skip the test if fallocate is not supported. - std::string fname_test_fallocate = ift.name() + "/preallocate_testfile_2"; + std::string fname_test_fallocate = + test::PerThreadDBPath(env_, "preallocate_testfile_2"); int fd = -1; do { fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644); @@ -1277,6 +1297,11 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) { // Create the files IoctlFriendlyTmpdir ift; + if (!ift.is_supported()) { + ROCKSDB_GTEST_BYPASS( + "FS_IOC_GETVERSION is not supported by the filesystem"); + return; + } std::vector fnames; for (int i = 0; i < 1000; ++i) { fnames.push_back(ift.name() + "/" + "testfile" + std::to_string(i)); @@ -1318,6 +1343,11 @@ TEST_P(EnvPosixTestWithParam, DISABLED_RandomAccessUniqueIDDeletes) { soptions.use_direct_reads = soptions.use_direct_writes = direct_io_; IoctlFriendlyTmpdir ift; + if (!ift.is_supported()) { + ROCKSDB_GTEST_BYPASS( + "FS_IOC_GETVERSION is not supported by the filesystem"); + return; + } std::string fname = ift.name() + "/" + "testfile"; // Check that after file is deleted we don't get same ID again in a new diff --git a/include/rocksdb/status.h b/include/rocksdb/status.h index 265e29cd4..1ab3dc4cb 100644 --- a/include/rocksdb/status.h +++ b/include/rocksdb/status.h @@ -48,7 +48,7 @@ class Status { if (!checked_) { fprintf(stderr, "Failed to check Status %p\n", this); port::PrintStack(); - abort(); + std::abort(); } #endif // ROCKSDB_ASSERT_STATUS_CHECKED } diff --git a/memtable/inlineskiplist.h b/memtable/inlineskiplist.h index 028fde3a2..4a4e63df0 100644 --- a/memtable/inlineskiplist.h +++ b/memtable/inlineskiplist.h @@ -608,7 +608,7 @@ InlineSkipList::FindRandomEntry() const { } // There is a special case where x could still be the head_ // (note that the head_ contains no key). - return x == head_ ? head_->Next(0) : x; + return x == head_ && head_ != nullptr ? head_->Next(0) : x; } template diff --git a/memtable/write_buffer_manager_test.cc b/memtable/write_buffer_manager_test.cc index 30789c0b3..f74565134 100644 --- a/memtable/write_buffer_manager_test.cc +++ b/memtable/write_buffer_manager_test.cc @@ -194,7 +194,7 @@ TEST_F(ChargeWriteBufferTest, Basic) { ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024); ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead); - // Destory write buffer manger should free everything + // Destroy write buffer manger should free everything wbf.reset(); ASSERT_EQ(cache->GetPinnedUsage(), 0); } diff --git a/tools/db_sanity_test.cc b/tools/db_sanity_test.cc index 21475a7e9..1c16bf392 100644 --- a/tools/db_sanity_test.cc +++ b/tools/db_sanity_test.cc @@ -37,9 +37,12 @@ class SanityTest { Options options = GetOptions(); options.create_if_missing = true; std::string dbname = path_ + Name(); - DestroyDB(dbname, options); + Status s = DestroyDB(dbname, options); + if (!s.ok()) { + return s; + } DB* db = nullptr; - Status s = DB::Open(options, dbname, &db); + s = DB::Open(options, dbname, &db); std::unique_ptr db_guard(db); if (!s.ok()) { return s; diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index 06b47829b..cf133b7e4 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -889,7 +889,7 @@ TEST_F(LdbCmdTest, LoadCFOptionsAndOverride) { DB* db = nullptr; std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test"); - DestroyDB(dbname, opts); + ASSERT_OK(DestroyDB(dbname, opts)); ASSERT_OK(DB::Open(opts, dbname, &db)); ColumnFamilyHandle* cf_handle; @@ -932,7 +932,7 @@ TEST_F(LdbCmdTest, UnsafeRemoveSstFile) { DB* db = nullptr; std::string dbname = test::PerThreadDBPath(Env::Default(), "ldb_cmd_test"); - DestroyDB(dbname, opts); + ASSERT_OK(DestroyDB(dbname, opts)); ASSERT_OK(DB::Open(opts, dbname, &db)); // Create three SST files @@ -1041,7 +1041,7 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) { DB* db = nullptr; std::string dbname = test::PerThreadDBPath(env.get(), "ldb_cmd_test"); - DestroyDB(dbname, opts); + ASSERT_OK(DestroyDB(dbname, opts)); ASSERT_OK(DB::Open(opts, dbname, &db)); std::array kTestTemps = { @@ -1123,8 +1123,8 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) { std::string old_dbname = test::PerThreadDBPath(env, "ldb_cmd_test"); std::string new_dbname = old_dbname + "_2"; - DestroyDB(old_dbname, opts); - DestroyDB(new_dbname, opts); + ASSERT_OK(DestroyDB(old_dbname, opts)); + ASSERT_OK(DestroyDB(new_dbname, opts)); char old_arg[1024]; snprintf(old_arg, sizeof(old_arg), "--db=%s", old_dbname.c_str()); @@ -1168,7 +1168,7 @@ TEST_F(LdbCmdTest, RenameDbAndLoadOptions) { 0, LDBCommandRunner::RunCommand(5, argv4, opts, LDBOptions(), nullptr)); ASSERT_EQ( 0, LDBCommandRunner::RunCommand(5, argv5, opts, LDBOptions(), nullptr)); - DestroyDB(new_dbname, opts); + ASSERT_OK(DestroyDB(new_dbname, opts)); } } // namespace ROCKSDB_NAMESPACE diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index c809c6423..4a60560e1 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -22,7 +22,7 @@ class ReduceLevelTest : public testing::Test { public: ReduceLevelTest() { dbname_ = test::PerThreadDBPath("db_reduce_levels_test"); - DestroyDB(dbname_, Options()); + EXPECT_OK(DestroyDB(dbname_, Options())); db_ = nullptr; } diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index 77c3c6890..b804c3a17 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -657,7 +657,7 @@ class BackupEngineTest : public testing::Test { engine_options_->max_background_operations = 7; // delete old files in db - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); // delete old LATEST_BACKUP file, which some tests create for compatibility // testing. @@ -993,6 +993,12 @@ class BackupEngineTest : public testing::Test { Options options_; protected: + void DestroyDBWithoutCheck(const std::string& dbname, + const Options& options) { + // DestroyDB may fail because the db might not be existed for some tests + DestroyDB(dbname, options).PermitUncheckedError(); + } + std::unique_ptr engine_options_; }; // BackupEngineTest @@ -1033,7 +1039,7 @@ TEST_F(BackupEngineTest, FileCollision) { // If the db directory has been cleaned up, it is sensitive to file // collision. - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); // open fresh DB, but old backups present OpenDBAndBackupEngine(false /* destroy_old_data */, false /* dummy */, @@ -1054,7 +1060,7 @@ TEST_F(BackupEngineTest, FileCollision) { CloseDBAndBackupEngine(); // delete old data - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); } } @@ -1099,7 +1105,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) { // second iter -- don't flush before backup for (int iter = 0; iter < 2; ++iter) { // delete old data - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); bool destroy_data = true; // every iteration -- @@ -1118,7 +1124,7 @@ TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) { ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), iter == 0)) << "iter: " << iter << ", idx: " << i; CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); // ---- make sure it's empty ---- DB* db = OpenDB(); @@ -1146,7 +1152,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) { const int max_key = keys_iteration * 4 + 10; Random rnd(7); // delete old data - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); // TODO: Implement & test db_paths support in backup (not supported in // restore) @@ -1171,7 +1177,7 @@ TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) { } // close and destroy CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); // ---- make sure it's empty ---- DB* db = OpenDB(); @@ -1547,7 +1553,7 @@ TEST_F(BackupEngineTest, TableFileCorruptedBeforeBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); // Enable table file checksum in DB manifest options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); @@ -1580,7 +1586,7 @@ TEST_F(BackupEngineTest, BlobFileCorruptedBeforeBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); // Enable file checksum in DB manifest options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); @@ -1614,7 +1620,7 @@ TEST_P(BackupEngineTestWithParam, TableFileCorruptedBeforeBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); // Enable table checksums in DB manifest options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); @@ -1643,7 +1649,7 @@ TEST_P(BackupEngineTestWithParam, BlobFileCorruptedBeforeBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); // Enable blob file checksums in DB manifest options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); @@ -1695,7 +1701,7 @@ TEST_F(BackupEngineTest, TableFileWithoutDbChecksumCorruptedDuringBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) { @@ -1734,7 +1740,7 @@ TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) { CloseDBAndBackupEngine(); // delete old files in db - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); } } @@ -2208,7 +2214,7 @@ TEST_F(BackupEngineTest, TableFileCorruptionBeforeIncremental) { } CloseDBAndBackupEngine(); - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); } } } @@ -2273,7 +2279,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) { // Even though we have "the same" DB state as backup 1, we need // to restore to recreate the same conditions as later restore. db_.reset(); - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_)); CloseDBAndBackupEngine(); @@ -2294,7 +2300,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) { // Restore backup 1 (again) db_.reset(); - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); ASSERT_OK(backup_engine_->RestoreDBFromBackup(1, dbname_, dbname_)); CloseDBAndBackupEngine(); @@ -2332,7 +2338,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) { EXPECT_EQ(children.size(), 3U); // Another SST added } CloseDBAndBackupEngine(); - ASSERT_OK(DestroyDB(dbname_, options_)); + DestroyDBWithoutCheck(dbname_, options_); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); } @@ -2609,7 +2615,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) { // destroy old data Options options; options.env = special_env.get(); - DestroyDB(dbname_, options); + DestroyDBWithoutCheck(dbname_, options); if (custom_rate_limiter) { std::shared_ptr backup_rate_limiter = @@ -2699,7 +2705,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) { Options options; options.env = special_env.get(); - DestroyDB(dbname_, options); + DestroyDBWithoutCheck(dbname_, options); // Rate limiter uses `CondVar::TimedWait()`, which does not have access to the // `Env` to advance its time according to the fake wait duration. The // workaround is to install a callback that advance the `Env`'s mock time. @@ -2743,7 +2749,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) { CloseDBAndBackupEngine(); AssertBackupConsistency(backup_id, 0, 10000, 10010); - DestroyDB(dbname_, options); + DestroyDBWithoutCheck(dbname_, options); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack( @@ -2760,7 +2766,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) { 10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */)); engine_options_->backup_rate_limiter = backup_rate_limiter; - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, kShareWithChecksum /* shared_option */); FillDB(db_.get(), 0, 10); @@ -2784,7 +2790,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) { total_bytes_through_with_no_read_charged); CloseDBAndBackupEngine(); AssertBackupConsistency(1, 0, 10, 20); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); } TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { @@ -2798,20 +2804,20 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { 10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */)); engine_options_->restore_rate_limiter = restore_rate_limiter; - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */); FillDB(db_.get(), 0, 10); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false /* flush_before_backup */)); CloseDBAndBackupEngine(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenBackupEngine(false /* destroy_old_data */); ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_)); std::int64_t total_bytes_through_with_no_read_charged = restore_rate_limiter->GetTotalBytesThrough(); CloseBackupEngine(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); restore_rate_limiter.reset(NewGenericRateLimiter( restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */, @@ -2826,7 +2832,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { total_bytes_through_with_no_read_charged * 2); CloseBackupEngine(); AssertBackupConsistency(1, 0, 10, 20); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); } TEST_P(BackupEngineRateLimitingTestWithParam, @@ -2840,7 +2846,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */)); engine_options_->backup_rate_limiter = backup_rate_limiter; - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */); FillDB(db_.get(), 0, 10); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), @@ -2857,7 +2863,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, EXPECT_GT(engine_options_->backup_rate_limiter->GetTotalBytesThrough(), total_bytes_through_before_initialize); CloseDBAndBackupEngine(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); } class BackupEngineRateLimitingTestWithParam2 @@ -2908,7 +2914,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, kShareWithChecksum /* shared_option */); @@ -2954,7 +2960,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, total_bytes_through_before_initialize); CloseDBAndBackupEngine(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); OpenBackupEngine(false /* destroy_old_data */); int64_t total_bytes_through_before_restore = engine_options_->restore_rate_limiter->GetTotalBytesThrough(); @@ -2965,7 +2971,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, total_bytes_through_before_restore); CloseBackupEngine(); - DestroyDB(dbname_, Options()); + DestroyDBWithoutCheck(dbname_, Options()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack( @@ -2975,14 +2981,14 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, #endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN) TEST_F(BackupEngineTest, ReadOnlyBackupEngine) { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); FillDB(db_.get(), 100, 200); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); engine_options_->destroy_old_data = false; test_backup_fs_->ClearWrittenFiles(); @@ -3007,7 +3013,7 @@ TEST_F(BackupEngineTest, ReadOnlyBackupEngine) { } TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); options_.write_dbid_to_manifest = false; OpenDBAndBackupEngine(true); @@ -3020,7 +3026,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) { FillDB(db_.get(), 100, 200); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), /*flush*/ false)); db_.reset(); // CloseDB - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); BackupInfo backup_info; // First, check that we get empty fields without include_file_details ASSERT_OK(backup_engine_->GetBackupInfo(/*id*/ 1U, &backup_info, @@ -3073,7 +3079,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) { } TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); // Too big for this small DB engine_options_->callback_trigger_interval_size = 100000; OpenDBAndBackupEngine(true); @@ -3093,11 +3099,11 @@ TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) { [&is_callback_invoked]() { is_callback_invoked = true; })); ASSERT_TRUE(is_callback_invoked); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, GarbageCollectionBeforeBackup) { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); OpenDBAndBackupEngine(true); ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared")); @@ -3151,7 +3157,7 @@ TEST_F(BackupEngineTest, EnvFailures) { // Read from meta-file failure { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); @@ -3176,7 +3182,7 @@ TEST_F(BackupEngineTest, EnvFailures) { // Verify manifest can roll while a backup is being created with the old // manifest. TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) { - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); options_.max_manifest_file_size = 0; // always rollover manifest for file add OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100, kAutoFlushOnly); @@ -3213,7 +3219,7 @@ TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) { ASSERT_TRUE(db_chroot_env_->FileExists(prev_manifest_path).IsNotFound()); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); AssertBackupConsistency(0, 0, 100); } @@ -3265,7 +3271,7 @@ TEST_F(BackupEngineTest, BackupWithMetadata) { ASSERT_EQ(std::to_string(i), backup_info.app_metadata); } CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, BinaryMetadata) { @@ -3283,7 +3289,7 @@ TEST_F(BackupEngineTest, BinaryMetadata) { ASSERT_EQ(1, backup_infos.size()); ASSERT_EQ(binaryMetadata, backup_infos[0].app_metadata); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, MetadataTooLarge) { @@ -3292,7 +3298,7 @@ TEST_F(BackupEngineTest, MetadataTooLarge) { ASSERT_NOK( backup_engine_->CreateNewBackupWithMetadata(db_.get(), largeMetadata)); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, MetaSchemaVersion2_SizeCorruption) { @@ -3734,7 +3740,7 @@ TEST_F(BackupEngineTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) { ASSERT_EQ(2, backup_infos[1].backup_id); ASSERT_EQ(4, backup_infos[2].backup_id); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } TEST_F(BackupEngineTest, CreateWhenLatestBackupCorrupted) { @@ -3942,7 +3948,7 @@ TEST_F(BackupEngineTest, BackgroundThreadCpuPriority) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); CloseDBAndBackupEngine(); - DestroyDB(dbname_, options_); + DestroyDBWithoutCheck(dbname_, options_); } // Populates `*total_size` with the size of all files under `backup_dir`. diff --git a/utilities/cassandra/cassandra_functional_test.cc b/utilities/cassandra/cassandra_functional_test.cc index 9d1143b34..15f633bac 100644 --- a/utilities/cassandra/cassandra_functional_test.cc +++ b/utilities/cassandra/cassandra_functional_test.cc @@ -122,7 +122,8 @@ private: class CassandraFunctionalTest : public testing::Test { public: CassandraFunctionalTest() { - DestroyDB(kDbName, Options()); // Start each test with a fresh DB + EXPECT_OK( + DestroyDB(kDbName, Options())); // Start each test with a fresh DB } std::shared_ptr OpenDb() { diff --git a/utilities/transactions/lock/range/range_locking_test.cc b/utilities/transactions/lock/range/range_locking_test.cc index 5ab6c60fc..45e138326 100644 --- a/utilities/transactions/lock/range/range_locking_test.cc +++ b/utilities/transactions/lock/range/range_locking_test.cc @@ -39,7 +39,7 @@ class RangeLockingTest : public ::testing::Test { options.create_if_missing = true; dbname = test::PerThreadDBPath("range_locking_testdb"); - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); range_lock_mgr.reset(NewRangeLockManager(nullptr)); txn_db_options.lock_mgr_handle = range_lock_mgr; @@ -55,7 +55,7 @@ class RangeLockingTest : public ::testing::Test { // seems to be a bug in btrfs that the makes readdir return recently // unlink-ed files. By using the default fs we simply ignore errors resulted // from attempting to delete such files in DestroyDB. - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); } PessimisticTransaction* NewTxn( diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index 1447ac8d1..47acfbe56 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -39,12 +39,12 @@ class OptimisticTransactionTest options.merge_operator.reset(new TestPutOperator()); dbname = test::PerThreadDBPath("optimistic_transaction_testdb"); - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); Open(); } ~OptimisticTransactionTest() override { delete txn_db; - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); } void Reopen() { diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 0a0f69255..982051ac7 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -57,12 +57,12 @@ class TtlTest : public testing::Test { options_.max_compaction_bytes = 1; // compaction should take place always from level0 for determinism db_ttl_ = nullptr; - DestroyDB(dbname_, Options()); + EXPECT_OK(DestroyDB(dbname_, Options())); } ~TtlTest() override { CloseTtl(); - DestroyDB(dbname_, Options()); + EXPECT_OK(DestroyDB(dbname_, Options())); } // Open database with TTL support when TTL not provided with db_ttl_ pointer diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index 87ef859ca..a57a95108 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -246,7 +246,7 @@ class WBWIBaseTest : public testing::Test { MergeOperators::CreateFromStringId("stringappend"); options_.create_if_missing = true; dbname_ = test::PerThreadDBPath("write_batch_with_index_test"); - DestroyDB(dbname_, options_); + EXPECT_OK(DestroyDB(dbname_, options_)); batch_.reset(new WriteBatchWithIndex(BytewiseComparator(), 20, overwrite)); } @@ -254,7 +254,7 @@ class WBWIBaseTest : public testing::Test { if (db_ != nullptr) { ReleaseSnapshot(); delete db_; - DestroyDB(dbname_, options_); + EXPECT_OK(DestroyDB(dbname_, options_)); } }