diff --git a/db/column_family_test.cc b/db/column_family_test.cc index d5ab13f28..0f4f94990 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -47,7 +47,7 @@ class EnvCounter : public EnvWrapper { int num_new_writable_file_; }; -class ColumnFamilyTest { +class ColumnFamilyTest : public testing::Test { public: ColumnFamilyTest() : rnd_(139) { env_ = new EnvCounter(Env::Default()); @@ -333,7 +333,7 @@ class DumbLogger : public Logger { virtual size_t GetLogFileSize() const override { return 0; } }; -TEST(ColumnFamilyTest, DontReuseColumnFamilyID) { +TEST_F(ColumnFamilyTest, DontReuseColumnFamilyID) { for (int iter = 0; iter < 3; ++iter) { Open(); CreateColumnFamilies({"one", "two", "three"}); @@ -360,8 +360,7 @@ TEST(ColumnFamilyTest, DontReuseColumnFamilyID) { } } - -TEST(ColumnFamilyTest, AddDrop) { +TEST_F(ColumnFamilyTest, AddDrop) { Open(); CreateColumnFamilies({"one", "two", "three"}); ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); @@ -387,7 +386,7 @@ TEST(ColumnFamilyTest, AddDrop) { std::vector({"default", "four", "three"})); } -TEST(ColumnFamilyTest, DropTest) { +TEST_F(ColumnFamilyTest, DropTest) { // first iteration - dont reopen DB before dropping // second iteration - reopen DB before dropping for (int iter = 0; iter < 2; ++iter) { @@ -411,7 +410,7 @@ TEST(ColumnFamilyTest, DropTest) { } } -TEST(ColumnFamilyTest, WriteBatchFailure) { +TEST_F(ColumnFamilyTest, WriteBatchFailure) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); WriteBatch batch; @@ -429,7 +428,7 @@ TEST(ColumnFamilyTest, WriteBatchFailure) { Close(); } -TEST(ColumnFamilyTest, ReadWrite) { +TEST_F(ColumnFamilyTest, ReadWrite) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); ASSERT_OK(Put(0, "foo", "v1")); @@ -453,7 +452,7 @@ TEST(ColumnFamilyTest, ReadWrite) { Close(); } -TEST(ColumnFamilyTest, IgnoreRecoveredLog) { +TEST_F(ColumnFamilyTest, IgnoreRecoveredLog) { std::string backup_logs = dbname_ + "/backup_logs"; // delete old files in backup_logs directory @@ -528,7 +527,7 @@ TEST(ColumnFamilyTest, IgnoreRecoveredLog) { } } -TEST(ColumnFamilyTest, FlushTest) { +TEST_F(ColumnFamilyTest, FlushTest) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); ASSERT_OK(Put(0, "foo", "v1")); @@ -577,7 +576,7 @@ TEST(ColumnFamilyTest, FlushTest) { } // Makes sure that obsolete log files get deleted -TEST(ColumnFamilyTest, LogDeletionTest) { +TEST_F(ColumnFamilyTest, LogDeletionTest) { db_options_.max_total_wal_size = std::numeric_limits::max(); column_family_options_.write_buffer_size = 100000; // 100KB Open(); @@ -644,7 +643,7 @@ TEST(ColumnFamilyTest, LogDeletionTest) { } // Makes sure that obsolete log files get deleted -TEST(ColumnFamilyTest, DifferentWriteBufferSizes) { +TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) { // disable flushing stale column families db_options_.max_total_wal_size = std::numeric_limits::max(); Open(); @@ -738,7 +737,7 @@ TEST(ColumnFamilyTest, DifferentWriteBufferSizes) { Close(); } -TEST(ColumnFamilyTest, MemtableNotSupportSnapshot) { +TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) { Open(); auto* s1 = dbfull()->GetSnapshot(); ASSERT_TRUE(s1 != nullptr); @@ -759,7 +758,7 @@ TEST(ColumnFamilyTest, MemtableNotSupportSnapshot) { Close(); } -TEST(ColumnFamilyTest, DifferentMergeOperators) { +TEST_F(ColumnFamilyTest, DifferentMergeOperators) { Open(); CreateColumnFamilies({"first", "second"}); ColumnFamilyOptions default_cf, first, second; @@ -789,7 +788,7 @@ TEST(ColumnFamilyTest, DifferentMergeOperators) { Close(); } -TEST(ColumnFamilyTest, DifferentCompactionStyles) { +TEST_F(ColumnFamilyTest, DifferentCompactionStyles) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -864,7 +863,7 @@ std::string IterStatus(Iterator* iter) { } } // anonymous namespace -TEST(ColumnFamilyTest, NewIteratorsTest) { +TEST_F(ColumnFamilyTest, NewIteratorsTest) { // iter == 0 -- no tailing // iter == 2 -- tailing for (int iter = 0; iter < 2; ++iter) { @@ -909,7 +908,7 @@ TEST(ColumnFamilyTest, NewIteratorsTest) { } } -TEST(ColumnFamilyTest, ReadOnlyDBTest) { +TEST_F(ColumnFamilyTest, ReadOnlyDBTest) { Open(); CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); ASSERT_OK(Put(0, "a", "b")); @@ -959,7 +958,7 @@ TEST(ColumnFamilyTest, ReadOnlyDBTest) { ASSERT_TRUE(!s.ok()); } -TEST(ColumnFamilyTest, DontRollEmptyLogs) { +TEST_F(ColumnFamilyTest, DontRollEmptyLogs) { Open(); CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); @@ -981,7 +980,7 @@ TEST(ColumnFamilyTest, DontRollEmptyLogs) { Close(); } -TEST(ColumnFamilyTest, FlushStaleColumnFamilies) { +TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -1010,7 +1009,7 @@ TEST(ColumnFamilyTest, FlushStaleColumnFamilies) { Close(); } -TEST(ColumnFamilyTest, CreateMissingColumnFamilies) { +TEST_F(ColumnFamilyTest, CreateMissingColumnFamilies) { Status s = TryOpen({"one", "two"}); ASSERT_TRUE(!s.ok()); db_options_.create_missing_column_families = true; @@ -1019,7 +1018,7 @@ TEST(ColumnFamilyTest, CreateMissingColumnFamilies) { Close(); } -TEST(ColumnFamilyTest, SanitizeOptions) { +TEST_F(ColumnFamilyTest, SanitizeOptions) { DBOptions db_options; for (int i = 1; i <= 3; i++) { for (int j = 1; j <= 3; j++) { @@ -1044,5 +1043,6 @@ TEST(ColumnFamilyTest, SanitizeOptions) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 129439665..b7255c2e9 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -13,7 +13,7 @@ namespace rocksdb { -class CompactFilesTest { +class CompactFilesTest : public testing::Test { public: CompactFilesTest() { env_ = Env::Default(); @@ -53,7 +53,7 @@ class FlushedFileCollector : public EventListener { std::mutex mutex_; }; -TEST(CompactFilesTest, ObsoleteFiles) { +TEST_F(CompactFilesTest, ObsoleteFiles) { Options options; // to trigger compaction more easily const int kWriteBufferSize = 10000; @@ -100,5 +100,6 @@ TEST(CompactFilesTest, ObsoleteFiles) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/compaction_job_test.cc b/db/compaction_job_test.cc index 7fb4b4def..fb7ad2332 100644 --- a/db/compaction_job_test.cc +++ b/db/compaction_job_test.cc @@ -20,7 +20,7 @@ namespace rocksdb { // TODO(icanadi) Make it simpler once we mock out VersionSet -class CompactionJobTest { +class CompactionJobTest : public testing::Test { public: CompactionJobTest() : env_(Env::Default()), @@ -134,7 +134,7 @@ class CompactionJobTest { std::shared_ptr mock_table_factory_; }; -TEST(CompactionJobTest, Simple) { +TEST_F(CompactionJobTest, Simple) { auto cfd = versions_->GetColumnFamilySet()->GetDefault(); auto expected_results = CreateTwoFiles(); @@ -179,4 +179,7 @@ TEST(CompactionJobTest, Simple) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 811b970c1..7be3a17ea 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -19,7 +19,7 @@ class CountingLogger : public Logger { size_t log_count; }; -class CompactionPickerTest { +class CompactionPickerTest : public testing::Test { public: const Comparator* ucmp_; InternalKeyComparator icmp_; @@ -93,7 +93,7 @@ class CompactionPickerTest { } }; -TEST(CompactionPickerTest, Empty) { +TEST_F(CompactionPickerTest, Empty) { NewVersionStorage(6, kCompactionStyleLevel); UpdateVersionStorageInfo(); std::unique_ptr compaction(level_compaction_picker.PickCompaction( @@ -101,7 +101,7 @@ TEST(CompactionPickerTest, Empty) { ASSERT_TRUE(compaction.get() == nullptr); } -TEST(CompactionPickerTest, Single) { +TEST_F(CompactionPickerTest, Single) { NewVersionStorage(6, kCompactionStyleLevel); mutable_cf_options_.level0_file_num_compaction_trigger = 2; Add(0, 1U, "p", "q"); @@ -112,7 +112,7 @@ TEST(CompactionPickerTest, Single) { ASSERT_TRUE(compaction.get() == nullptr); } -TEST(CompactionPickerTest, Level0Trigger) { +TEST_F(CompactionPickerTest, Level0Trigger) { NewVersionStorage(6, kCompactionStyleLevel); mutable_cf_options_.level0_file_num_compaction_trigger = 2; Add(0, 1U, "150", "200"); @@ -128,7 +128,7 @@ TEST(CompactionPickerTest, Level0Trigger) { ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); } -TEST(CompactionPickerTest, Level1Trigger) { +TEST_F(CompactionPickerTest, Level1Trigger) { NewVersionStorage(6, kCompactionStyleLevel); Add(1, 66U, "150", "200", 1000000000U); UpdateVersionStorageInfo(); @@ -140,7 +140,7 @@ TEST(CompactionPickerTest, Level1Trigger) { ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber()); } -TEST(CompactionPickerTest, Level1Trigger2) { +TEST_F(CompactionPickerTest, Level1Trigger2) { NewVersionStorage(6, kCompactionStyleLevel); Add(1, 66U, "150", "200", 1000000001U); Add(1, 88U, "201", "300", 1000000000U); @@ -159,7 +159,7 @@ TEST(CompactionPickerTest, Level1Trigger2) { ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber()); } -TEST(CompactionPickerTest, LevelMaxScore) { +TEST_F(CompactionPickerTest, LevelMaxScore) { NewVersionStorage(6, kCompactionStyleLevel); mutable_cf_options_.target_file_size_base = 10000000; mutable_cf_options_.target_file_size_multiplier = 10; @@ -185,7 +185,7 @@ TEST(CompactionPickerTest, LevelMaxScore) { ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber()); } -TEST(CompactionPickerTest, NeedsCompactionLevel) { +TEST_F(CompactionPickerTest, NeedsCompactionLevel) { const int kLevels = 6; const int kFileCount = 20; @@ -210,7 +210,7 @@ TEST(CompactionPickerTest, NeedsCompactionLevel) { } } -TEST(CompactionPickerTest, Level0TriggerDynamic) { +TEST_F(CompactionPickerTest, Level0TriggerDynamic) { int num_levels = ioptions_.num_levels; ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.level0_file_num_compaction_trigger = 2; @@ -232,7 +232,7 @@ TEST(CompactionPickerTest, Level0TriggerDynamic) { ASSERT_EQ(num_levels - 1, compaction->output_level()); } -TEST(CompactionPickerTest, Level0TriggerDynamic2) { +TEST_F(CompactionPickerTest, Level0TriggerDynamic2) { int num_levels = ioptions_.num_levels; ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.level0_file_num_compaction_trigger = 2; @@ -256,7 +256,7 @@ TEST(CompactionPickerTest, Level0TriggerDynamic2) { ASSERT_EQ(num_levels - 2, compaction->output_level()); } -TEST(CompactionPickerTest, Level0TriggerDynamic3) { +TEST_F(CompactionPickerTest, Level0TriggerDynamic3) { int num_levels = ioptions_.num_levels; ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.level0_file_num_compaction_trigger = 2; @@ -281,7 +281,7 @@ TEST(CompactionPickerTest, Level0TriggerDynamic3) { ASSERT_EQ(num_levels - 3, compaction->output_level()); } -TEST(CompactionPickerTest, Level0TriggerDynamic4) { +TEST_F(CompactionPickerTest, Level0TriggerDynamic4) { int num_levels = ioptions_.num_levels; ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.level0_file_num_compaction_trigger = 2; @@ -312,7 +312,7 @@ TEST(CompactionPickerTest, Level0TriggerDynamic4) { ASSERT_EQ(num_levels - 3, compaction->output_level()); } -TEST(CompactionPickerTest, LevelTriggerDynamic4) { +TEST_F(CompactionPickerTest, LevelTriggerDynamic4) { int num_levels = ioptions_.num_levels; ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.level0_file_num_compaction_trigger = 2; @@ -341,7 +341,7 @@ TEST(CompactionPickerTest, LevelTriggerDynamic4) { ASSERT_EQ(num_levels - 1, compaction->output_level()); } -TEST(CompactionPickerTest, NeedsCompactionUniversal) { +TEST_F(CompactionPickerTest, NeedsCompactionUniversal) { NewVersionStorage(1, kCompactionStyleUniversal); UniversalCompactionPicker universal_compaction_picker( ioptions_, &icmp_); @@ -360,7 +360,7 @@ TEST(CompactionPickerTest, NeedsCompactionUniversal) { } } -TEST(CompactionPickerTest, NeedsCompactionFIFO) { +TEST_F(CompactionPickerTest, NeedsCompactionFIFO) { NewVersionStorage(1, kCompactionStyleFIFO); const int kFileCount = mutable_cf_options_.level0_file_num_compaction_trigger * 3; @@ -390,4 +390,7 @@ TEST(CompactionPickerTest, NeedsCompactionFIFO) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 489b20e08..4d28d7c80 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -248,7 +248,7 @@ class TwoStrComparator : public Comparator { }; } // namespace -class ComparatorDBTest { +class ComparatorDBTest : public testing::Test { private: std::string dbname_; Env* env_; @@ -301,7 +301,7 @@ class ComparatorDBTest { } }; -TEST(ComparatorDBTest, Bytewise) { +TEST_F(ComparatorDBTest, Bytewise) { for (int rand_seed = 301; rand_seed < 306; rand_seed++) { DestroyAndReopen(); Random rnd(rand_seed); @@ -311,7 +311,7 @@ TEST(ComparatorDBTest, Bytewise) { } } -TEST(ComparatorDBTest, SimpleSuffixReverseComparator) { +TEST_F(ComparatorDBTest, SimpleSuffixReverseComparator) { SetOwnedComparator(new test::SimpleSuffixReverseComparator()); for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { @@ -337,7 +337,7 @@ TEST(ComparatorDBTest, SimpleSuffixReverseComparator) { } } -TEST(ComparatorDBTest, Uint64Comparator) { +TEST_F(ComparatorDBTest, Uint64Comparator) { SetOwnedComparator(test::Uint64Comparator()); for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { @@ -361,7 +361,7 @@ TEST(ComparatorDBTest, Uint64Comparator) { } } -TEST(ComparatorDBTest, DoubleComparator) { +TEST_F(ComparatorDBTest, DoubleComparator) { SetOwnedComparator(new DoubleComparator()); for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { @@ -386,7 +386,7 @@ TEST(ComparatorDBTest, DoubleComparator) { } } -TEST(ComparatorDBTest, HashComparator) { +TEST_F(ComparatorDBTest, HashComparator) { SetOwnedComparator(new HashComparator()); for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { @@ -405,7 +405,7 @@ TEST(ComparatorDBTest, HashComparator) { } } -TEST(ComparatorDBTest, TwoStrComparator) { +TEST_F(ComparatorDBTest, TwoStrComparator) { SetOwnedComparator(new TwoStrComparator()); for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { @@ -433,4 +433,7 @@ TEST(ComparatorDBTest, TwoStrComparator) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 2cea9da65..ce31cf61c 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -29,7 +29,7 @@ namespace rocksdb { static const int kValueSize = 1000; -class CorruptionTest { +class CorruptionTest : public testing::Test { public: test::ErrorEnv env_; std::string dbname_; @@ -226,7 +226,7 @@ class CorruptionTest { } }; -TEST(CorruptionTest, Recovery) { +TEST_F(CorruptionTest, Recovery) { Build(100); Check(100, 100); Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record @@ -239,13 +239,13 @@ TEST(CorruptionTest, Recovery) { Check(36, 36); } -TEST(CorruptionTest, RecoverWriteError) { +TEST_F(CorruptionTest, RecoverWriteError) { env_.writable_file_error_ = true; Status s = TryReopen(); ASSERT_TRUE(!s.ok()); } -TEST(CorruptionTest, NewFileErrorDuringWrite) { +TEST_F(CorruptionTest, NewFileErrorDuringWrite) { // Do enough writing to force minor compaction env_.writable_file_error_ = true; const int num = @@ -268,7 +268,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) { Reopen(); } -TEST(CorruptionTest, TableFile) { +TEST_F(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); @@ -279,7 +279,7 @@ TEST(CorruptionTest, TableFile) { Check(99, 99); } -TEST(CorruptionTest, TableFileIndexData) { +TEST_F(CorruptionTest, TableFileIndexData) { Build(10000); // Enough to build multiple Tables DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); @@ -289,14 +289,14 @@ TEST(CorruptionTest, TableFileIndexData) { Check(5000, 9999); } -TEST(CorruptionTest, MissingDescriptor) { +TEST_F(CorruptionTest, MissingDescriptor) { Build(1000); RepairDB(); Reopen(); Check(1000, 1000); } -TEST(CorruptionTest, SequenceNumberRecovery) { +TEST_F(CorruptionTest, SequenceNumberRecovery) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); @@ -317,7 +317,7 @@ TEST(CorruptionTest, SequenceNumberRecovery) { ASSERT_EQ("v6", v); } -TEST(CorruptionTest, CorruptedDescriptor) { +TEST_F(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); @@ -334,7 +334,7 @@ TEST(CorruptionTest, CorruptedDescriptor) { ASSERT_EQ("hello", v); } -TEST(CorruptionTest, CompactionInputError) { +TEST_F(CorruptionTest, CompactionInputError) { Options options; options.max_background_flushes = 0; Reopen(&options); @@ -352,7 +352,7 @@ TEST(CorruptionTest, CompactionInputError) { Check(10000, 10000); } -TEST(CorruptionTest, CompactionInputErrorParanoid) { +TEST_F(CorruptionTest, CompactionInputErrorParanoid) { Options options; options.paranoid_checks = true; options.write_buffer_size = 131072; @@ -395,7 +395,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db"; } -TEST(CorruptionTest, UnrelatedKeys) { +TEST_F(CorruptionTest, UnrelatedKeys) { Build(10); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); @@ -411,7 +411,7 @@ TEST(CorruptionTest, UnrelatedKeys) { ASSERT_EQ(Value(1000, &tmp2).ToString(), v); } -TEST(CorruptionTest, FileSystemStateCorrupted) { +TEST_F(CorruptionTest, FileSystemStateCorrupted) { for (int iter = 0; iter < 2; ++iter) { Options options; options.paranoid_checks = true; @@ -447,5 +447,6 @@ TEST(CorruptionTest, FileSystemStateCorrupted) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index abdc95bf4..8c2113b71 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -14,7 +14,7 @@ namespace rocksdb { -class CuckooTableDBTest { +class CuckooTableDBTest : public testing::Test { private: std::string dbname_; Env* env_; @@ -106,7 +106,7 @@ class CuckooTableDBTest { } }; -TEST(CuckooTableDBTest, Flush) { +TEST_F(CuckooTableDBTest, Flush) { // Try with empty DB first. ASSERT_TRUE(dbfull() != nullptr); ASSERT_EQ("NOT_FOUND", Get("key2")); @@ -169,7 +169,7 @@ TEST(CuckooTableDBTest, Flush) { ASSERT_EQ("NOT_FOUND", Get("key6")); } -TEST(CuckooTableDBTest, FlushWithDuplicateKeys) { +TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) { Options options = CurrentOptions(); Reopen(&options); ASSERT_OK(Put("key1", "v1")); @@ -200,7 +200,7 @@ static std::string Uint64Key(uint64_t i) { } } // namespace. -TEST(CuckooTableDBTest, Uint64Comparator) { +TEST_F(CuckooTableDBTest, Uint64Comparator) { Options options = CurrentOptions(); options.comparator = test::Uint64Comparator(); Reopen(&options); @@ -227,7 +227,7 @@ TEST(CuckooTableDBTest, Uint64Comparator) { ASSERT_EQ("v4", Get(Uint64Key(4))); } -TEST(CuckooTableDBTest, CompactionIntoMultipleFiles) { +TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) { // Create a big L0 file and check it compacts into multiple files in L1. Options options = CurrentOptions(); options.write_buffer_size = 270 << 10; @@ -250,7 +250,7 @@ TEST(CuckooTableDBTest, CompactionIntoMultipleFiles) { } } -TEST(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { +TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { // Insert same key twice so that they go to different SST files. Then wait for // compaction and check if the latest value is stored and old value removed. Options options = CurrentOptions(); @@ -278,7 +278,7 @@ TEST(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { } } -TEST(CuckooTableDBTest, AdaptiveTable) { +TEST_F(CuckooTableDBTest, AdaptiveTable) { Options options = CurrentOptions(); // Write some keys using cuckoo table. @@ -315,4 +315,7 @@ TEST(CuckooTableDBTest, AdaptiveTable) { } } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index f045d7798..aa7cad3de 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -139,14 +139,14 @@ class TestIterator : public Iterator { std::vector> data_; }; -class DBIteratorTest { +class DBIteratorTest : public testing::Test { public: Env* env_; DBIteratorTest() : env_(Env::Default()) {} }; -TEST(DBIteratorTest, DBIteratorPrevNext) { +TEST_F(DBIteratorTest, DBIteratorPrevNext) { Options options; { @@ -289,7 +289,7 @@ TEST(DBIteratorTest, DBIteratorPrevNext) { } } -TEST(DBIteratorTest, DBIteratorEmpty) { +TEST_F(DBIteratorTest, DBIteratorEmpty) { Options options; { @@ -317,7 +317,7 @@ TEST(DBIteratorTest, DBIteratorEmpty) { } } -TEST(DBIteratorTest, DBIteratorUseSkipCountSkips) { +TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) { Options options; options.statistics = rocksdb::CreateDBStatistics(); options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -357,7 +357,7 @@ TEST(DBIteratorTest, DBIteratorUseSkipCountSkips) { ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u); } -TEST(DBIteratorTest, DBIteratorUseSkip) { +TEST_F(DBIteratorTest, DBIteratorUseSkip) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); { @@ -586,7 +586,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { } } -TEST(DBIteratorTest, DBIterator1) { +TEST_F(DBIteratorTest, DBIterator1) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -610,7 +610,7 @@ TEST(DBIteratorTest, DBIterator1) { ASSERT_EQ(db_iter->key().ToString(), "b"); } -TEST(DBIteratorTest, DBIterator2) { +TEST_F(DBIteratorTest, DBIterator2) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -633,7 +633,7 @@ TEST(DBIteratorTest, DBIterator2) { ASSERT_TRUE(!db_iter->Valid()); } -TEST(DBIteratorTest, DBIterator3) { +TEST_F(DBIteratorTest, DBIterator3) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -655,7 +655,7 @@ TEST(DBIteratorTest, DBIterator3) { db_iter->Next(); ASSERT_TRUE(!db_iter->Valid()); } -TEST(DBIteratorTest, DBIterator4) { +TEST_F(DBIteratorTest, DBIterator4) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -682,7 +682,7 @@ TEST(DBIteratorTest, DBIterator4) { ASSERT_TRUE(!db_iter->Valid()); } -TEST(DBIteratorTest, DBIterator5) { +TEST_F(DBIteratorTest, DBIterator5) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); { @@ -840,7 +840,7 @@ TEST(DBIteratorTest, DBIterator5) { } } -TEST(DBIteratorTest, DBIterator6) { +TEST_F(DBIteratorTest, DBIterator6) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); { @@ -994,7 +994,7 @@ TEST(DBIteratorTest, DBIterator6) { } } -TEST(DBIteratorTest, DBIterator7) { +TEST_F(DBIteratorTest, DBIterator7) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); { @@ -1376,7 +1376,7 @@ TEST(DBIteratorTest, DBIterator7) { ASSERT_TRUE(!db_iter->Valid()); } } -TEST(DBIteratorTest, DBIterator8) { +TEST_F(DBIteratorTest, DBIterator8) { Options options; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); @@ -1402,4 +1402,7 @@ TEST(DBIteratorTest, DBIterator8) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/db_test.cc b/db/db_test.cc index 4c0155e92..b56030f0b 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -396,7 +396,7 @@ class SpecialEnv : public EnvWrapper { } }; -class DBTest { +class DBTest : public testing::Test { protected: // Sequence of option configurations to try enum OptionConfig { @@ -1294,7 +1294,7 @@ uint64_t GetNumberOfSstFilesForColumnFamily(DB* db, } } // namespace -TEST(DBTest, Empty) { +TEST_F(DBTest, Empty) { do { Options options; options.env = env_; @@ -1356,7 +1356,7 @@ TEST(DBTest, Empty) { } while (ChangeOptions()); } -TEST(DBTest, WriteEmptyBatch) { +TEST_F(DBTest, WriteEmptyBatch) { Options options; options.env = env_; options.write_buffer_size = 100000; @@ -1377,7 +1377,7 @@ TEST(DBTest, WriteEmptyBatch) { ASSERT_EQ("bar", Get(1, "foo")); } -TEST(DBTest, ReadOnlyDB) { +TEST_F(DBTest, ReadOnlyDB) { ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Put("bar", "v2")); ASSERT_OK(Put("foo", "v3")); @@ -1408,7 +1408,7 @@ TEST(DBTest, ReadOnlyDB) { ASSERT_EQ("v2", Get("bar")); } -TEST(DBTest, CompactedDB) { +TEST_F(DBTest, CompactedDB) { const uint64_t kFileSize = 1 << 20; Options options; options.disable_auto_compactions = true; @@ -1503,7 +1503,7 @@ TEST(DBTest, CompactedDB) { // Make sure that when options.block_cache is set, after a new table is // created its index/filter blocks are added to block cache. -TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) { +TEST_F(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) { Options options = CurrentOptions(); options.create_if_missing = true; options.statistics = rocksdb::CreateDBStatistics(); @@ -1554,7 +1554,7 @@ TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) { TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); } -TEST(DBTest, GetPropertiesOfAllTablesTest) { +TEST_F(DBTest, GetPropertiesOfAllTablesTest) { Options options = CurrentOptions(); options.max_background_flushes = 0; Reopen(options); @@ -1590,7 +1590,7 @@ TEST(DBTest, GetPropertiesOfAllTablesTest) { VerifyTableProperties(db_, 10 + 11 + 12 + 13); } -TEST(DBTest, LevelLimitReopen) { +TEST_F(DBTest, LevelLimitReopen) { Options options = CurrentOptions(); CreateAndReopenWithCF({"pikachu"}, options); @@ -1612,7 +1612,7 @@ TEST(DBTest, LevelLimitReopen) { ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); } -TEST(DBTest, PutDeleteGet) { +TEST_F(DBTest, PutDeleteGet) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -1624,8 +1624,7 @@ TEST(DBTest, PutDeleteGet) { } while (ChangeOptions()); } - -TEST(DBTest, GetFromImmutableLayer) { +TEST_F(DBTest, GetFromImmutableLayer) { do { Options options; options.env = env_; @@ -1647,7 +1646,7 @@ TEST(DBTest, GetFromImmutableLayer) { } while (ChangeOptions()); } -TEST(DBTest, GetFromVersions) { +TEST_F(DBTest, GetFromVersions) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -1657,7 +1656,7 @@ TEST(DBTest, GetFromVersions) { } while (ChangeOptions()); } -TEST(DBTest, GetSnapshot) { +TEST_F(DBTest, GetSnapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -1683,7 +1682,7 @@ TEST(DBTest, GetSnapshot) { } while (ChangeOptions()); } -TEST(DBTest, GetSnapshotLink) { +TEST_F(DBTest, GetSnapshotLink) { do { Options options; const std::string snapshot_name = test::TmpDir(env_) + "/snapshot"; @@ -1740,7 +1739,7 @@ TEST(DBTest, GetSnapshotLink) { } while (ChangeOptions()); } -TEST(DBTest, GetLevel0Ordering) { +TEST_F(DBTest, GetLevel0Ordering) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); // Check that we process level-0 files in correct order. The code @@ -1756,7 +1755,7 @@ TEST(DBTest, GetLevel0Ordering) { } while (ChangeOptions()); } -TEST(DBTest, WrongLevel0Config) { +TEST_F(DBTest, WrongLevel0Config) { Options options = CurrentOptions(); Close(); ASSERT_OK(DestroyDB(dbname_, options)); @@ -1766,7 +1765,7 @@ TEST(DBTest, WrongLevel0Config) { ASSERT_OK(DB::Open(options, dbname_, &db_)); } -TEST(DBTest, GetOrderedByLevels) { +TEST_F(DBTest, GetOrderedByLevels) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -1779,7 +1778,7 @@ TEST(DBTest, GetOrderedByLevels) { } while (ChangeOptions()); } -TEST(DBTest, GetPicksCorrectFile) { +TEST_F(DBTest, GetPicksCorrectFile) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); // Arrange to have multiple files in a non-level-0 level. @@ -1795,7 +1794,7 @@ TEST(DBTest, GetPicksCorrectFile) { } while (ChangeOptions()); } -TEST(DBTest, GetEncountersEmptyLevel) { +TEST_F(DBTest, GetEncountersEmptyLevel) { do { Options options = CurrentOptions(); options.max_background_flushes = 0; @@ -1840,7 +1839,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { // KeyMayExist can lead to a few false positives, but not false negatives. // To make test deterministic, use a much larger number of bits per key-20 than // bits in the key, so that false positives are eliminated -TEST(DBTest, KeyMayExist) { +TEST_F(DBTest, KeyMayExist) { do { ReadOptions ropts; std::string value; @@ -1903,7 +1902,7 @@ TEST(DBTest, KeyMayExist) { ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction)); } -TEST(DBTest, NonBlockingIteration) { +TEST_F(DBTest, NonBlockingIteration) { do { ReadOptions non_blocking_opts, regular_opts; Options options = CurrentOptions(); @@ -1967,7 +1966,7 @@ TEST(DBTest, NonBlockingIteration) { kSkipMmapReads)); } -TEST(DBTest, ManagedNonBlockingIteration) { +TEST_F(DBTest, ManagedNonBlockingIteration) { do { ReadOptions non_blocking_opts, regular_opts; Options options = CurrentOptions(); @@ -2034,7 +2033,7 @@ TEST(DBTest, ManagedNonBlockingIteration) { // A delete is skipped for key if KeyMayExist(key) returns False // Tests Writebatch consistency and proper delete behaviour -TEST(DBTest, FilterDeletes) { +TEST_F(DBTest, FilterDeletes) { do { anon::OptionsOverride options_override; options_override.filter_policy.reset(NewBloomFilterPolicy(20)); @@ -2071,7 +2070,7 @@ TEST(DBTest, FilterDeletes) { } while (ChangeCompactOptions()); } -TEST(DBTest, GetFilterByPrefixBloom) { +TEST_F(DBTest, GetFilterByPrefixBloom) { Options options = last_options_; options.prefix_extractor.reset(NewFixedPrefixTransform(8)); options.statistics = rocksdb::CreateDBStatistics(); @@ -2107,7 +2106,7 @@ TEST(DBTest, GetFilterByPrefixBloom) { ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2); } -TEST(DBTest, WholeKeyFilterProp) { +TEST_F(DBTest, WholeKeyFilterProp) { Options options = last_options_; options.prefix_extractor.reset(NewFixedPrefixTransform(3)); options.statistics = rocksdb::CreateDBStatistics(); @@ -2256,7 +2255,7 @@ TEST(DBTest, WholeKeyFilterProp) { ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12); } -TEST(DBTest, IterSeekBeforePrev) { +TEST_F(DBTest, IterSeekBeforePrev) { ASSERT_OK(Put("a", "b")); ASSERT_OK(Put("c", "d")); dbfull()->Flush(FlushOptions()); @@ -2278,7 +2277,7 @@ std::string MakeLongKey(size_t length, char c) { } } // namespace -TEST(DBTest, IterLongKeys) { +TEST_F(DBTest, IterLongKeys) { ASSERT_OK(Put(MakeLongKey(20, 0), "0")); ASSERT_OK(Put(MakeLongKey(32, 2), "2")); ASSERT_OK(Put("a", "b")); @@ -2311,8 +2310,7 @@ TEST(DBTest, IterLongKeys) { delete iter; } - -TEST(DBTest, IterNextWithNewerSeq) { +TEST_F(DBTest, IterNextWithNewerSeq) { ASSERT_OK(Put("0", "0")); dbfull()->Flush(FlushOptions()); ASSERT_OK(Put("a", "b")); @@ -2333,7 +2331,7 @@ TEST(DBTest, IterNextWithNewerSeq) { delete iter; } -TEST(DBTest, IterPrevWithNewerSeq) { +TEST_F(DBTest, IterPrevWithNewerSeq) { ASSERT_OK(Put("0", "0")); dbfull()->Flush(FlushOptions()); ASSERT_OK(Put("a", "b")); @@ -2358,7 +2356,7 @@ TEST(DBTest, IterPrevWithNewerSeq) { delete iter; } -TEST(DBTest, IterPrevWithNewerSeq2) { +TEST_F(DBTest, IterPrevWithNewerSeq2) { ASSERT_OK(Put("0", "0")); dbfull()->Flush(FlushOptions()); ASSERT_OK(Put("a", "b")); @@ -2381,7 +2379,7 @@ TEST(DBTest, IterPrevWithNewerSeq2) { delete iter; } -TEST(DBTest, IterEmpty) { +TEST_F(DBTest, IterEmpty) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); @@ -2399,7 +2397,7 @@ TEST(DBTest, IterEmpty) { } while (ChangeCompactOptions()); } -TEST(DBTest, IterSingle) { +TEST_F(DBTest, IterSingle) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "a", "va")); @@ -2440,7 +2438,7 @@ TEST(DBTest, IterSingle) { } while (ChangeCompactOptions()); } -TEST(DBTest, IterMulti) { +TEST_F(DBTest, IterMulti) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "a", "va")); @@ -2529,7 +2527,7 @@ TEST(DBTest, IterMulti) { // Check that we can skip over a run of user keys // by using reseek rather than sequential scan -TEST(DBTest, IterReseek) { +TEST_F(DBTest, IterReseek) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; Options options = CurrentOptions(options_override); @@ -2614,7 +2612,7 @@ TEST(DBTest, IterReseek) { delete iter; } -TEST(DBTest, IterSmallAndLargeMix) { +TEST_F(DBTest, IterSmallAndLargeMix) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "a", "va")); @@ -2655,7 +2653,7 @@ TEST(DBTest, IterSmallAndLargeMix) { } while (ChangeCompactOptions()); } -TEST(DBTest, IterMultiWithDelete) { +TEST_F(DBTest, IterMultiWithDelete) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "ka", "va")); @@ -2680,7 +2678,7 @@ TEST(DBTest, IterMultiWithDelete) { } while (ChangeOptions()); } -TEST(DBTest, IterPrevMaxSkip) { +TEST_F(DBTest, IterPrevMaxSkip) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); for (int i = 0; i < 2; i++) { @@ -2710,7 +2708,7 @@ TEST(DBTest, IterPrevMaxSkip) { } while (ChangeOptions(kSkipMergePut | kSkipNoSeekToLast)); } -TEST(DBTest, IterWithSnapshot) { +TEST_F(DBTest, IterWithSnapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -2756,7 +2754,7 @@ TEST(DBTest, IterWithSnapshot) { } while (ChangeOptions(kSkipHashCuckoo)); } -TEST(DBTest, Recover) { +TEST_F(DBTest, Recover) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -2779,7 +2777,7 @@ TEST(DBTest, Recover) { } while (ChangeOptions()); } -TEST(DBTest, RecoverWithTableHandle) { +TEST_F(DBTest, RecoverWithTableHandle) { do { Options options; options.create_if_missing = true; @@ -2817,7 +2815,7 @@ TEST(DBTest, RecoverWithTableHandle) { } while (ChangeOptions()); } -TEST(DBTest, IgnoreRecoveredLog) { +TEST_F(DBTest, IgnoreRecoveredLog) { std::string backup_logs = dbname_ + "/backup_logs"; // delete old files in backup_logs directory @@ -2906,7 +2904,7 @@ TEST(DBTest, IgnoreRecoveredLog) { } while (ChangeOptions(kSkipHashCuckoo)); } -TEST(DBTest, RollLog) { +TEST_F(DBTest, RollLog) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -2923,7 +2921,7 @@ TEST(DBTest, RollLog) { } while (ChangeOptions()); } -TEST(DBTest, WAL) { +TEST_F(DBTest, WAL) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); WriteOptions writeOpt = WriteOptions(); @@ -2957,7 +2955,7 @@ TEST(DBTest, WAL) { } while (ChangeCompactOptions()); } -TEST(DBTest, CheckLock) { +TEST_F(DBTest, CheckLock) { do { DB* localdb; Options options = CurrentOptions(); @@ -2968,7 +2966,7 @@ TEST(DBTest, CheckLock) { } while (ChangeCompactOptions()); } -TEST(DBTest, FlushMultipleMemtable) { +TEST_F(DBTest, FlushMultipleMemtable) { do { Options options = CurrentOptions(); WriteOptions writeOpt = WriteOptions(); @@ -2986,7 +2984,7 @@ TEST(DBTest, FlushMultipleMemtable) { } while (ChangeCompactOptions()); } -TEST(DBTest, NumImmutableMemTable) { +TEST_F(DBTest, NumImmutableMemTable) { do { Options options = CurrentOptions(); WriteOptions writeOpt = WriteOptions(); @@ -3101,7 +3099,7 @@ class SleepingBackgroundTask { bool done_with_sleep_; }; -TEST(DBTest, FlushEmptyColumnFamily) { +TEST_F(DBTest, FlushEmptyColumnFamily) { // Block flush thread and disable compaction thread env_->SetBackgroundThreads(1, Env::HIGH); env_->SetBackgroundThreads(1, Env::LOW); @@ -3144,7 +3142,7 @@ TEST(DBTest, FlushEmptyColumnFamily) { sleeping_task_low.WaitUntilDone(); } -TEST(DBTest, GetProperty) { +TEST_F(DBTest, GetProperty) { // Set sizes to both background thread pool to be 1 and block them. env_->SetBackgroundThreads(1, Env::HIGH); env_->SetBackgroundThreads(1, Env::LOW); @@ -3292,7 +3290,7 @@ TEST(DBTest, GetProperty) { } } -TEST(DBTest, FLUSH) { +TEST_F(DBTest, FLUSH) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); WriteOptions writeOpt = WriteOptions(); @@ -3337,7 +3335,7 @@ TEST(DBTest, FLUSH) { } while (ChangeCompactOptions()); } -TEST(DBTest, RecoveryWithEmptyLog) { +TEST_F(DBTest, RecoveryWithEmptyLog) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -3352,7 +3350,7 @@ TEST(DBTest, RecoveryWithEmptyLog) { // Check that writes done during a memtable compaction are recovered // if the database is shutdown during the memtable compaction. -TEST(DBTest, RecoverDuringMemtableCompaction) { +TEST_F(DBTest, RecoverDuringMemtableCompaction) { do { Options options; options.env = env_; @@ -3377,7 +3375,7 @@ TEST(DBTest, RecoverDuringMemtableCompaction) { // false positive TSAN report on shared_ptr -- // https://groups.google.com/forum/#!topic/thread-sanitizer/vz_s-t226Vg #ifndef ROCKSDB_TSAN_RUN -TEST(DBTest, FlushSchedule) { +TEST_F(DBTest, FlushSchedule) { Options options = CurrentOptions(); options.disable_auto_compactions = true; options.level0_stop_writes_trigger = 1 << 10; @@ -3416,7 +3414,7 @@ TEST(DBTest, FlushSchedule) { } #endif // enabled only if not TSAN run -TEST(DBTest, MinorCompactionsHappen) { +TEST_F(DBTest, MinorCompactionsHappen) { do { Options options; options.write_buffer_size = 10000; @@ -3444,7 +3442,7 @@ TEST(DBTest, MinorCompactionsHappen) { } while (ChangeCompactOptions()); } -TEST(DBTest, ManifestRollOver) { +TEST_F(DBTest, ManifestRollOver) { do { Options options; options.max_manifest_file_size = 10 ; // 10 bytes @@ -3468,7 +3466,7 @@ TEST(DBTest, ManifestRollOver) { } while (ChangeCompactOptions()); } -TEST(DBTest, IdentityAcrossRestarts) { +TEST_F(DBTest, IdentityAcrossRestarts) { do { std::string id1; ASSERT_OK(db_->GetDbIdentity(id1)); @@ -3490,7 +3488,7 @@ TEST(DBTest, IdentityAcrossRestarts) { } while (ChangeCompactOptions()); } -TEST(DBTest, RecoverWithLargeLog) { +TEST_F(DBTest, RecoverWithLargeLog) { do { { Options options = CurrentOptions(); @@ -3517,7 +3515,7 @@ TEST(DBTest, RecoverWithLargeLog) { } while (ChangeCompactOptions()); } -TEST(DBTest, CompactionsGenerateMultipleFiles) { +TEST_F(DBTest, CompactionsGenerateMultipleFiles) { Options options; options.write_buffer_size = 100000000; // Large write buffer options = CurrentOptions(options); @@ -3544,7 +3542,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { } } -TEST(DBTest, CompactionTrigger) { +TEST_F(DBTest, CompactionTrigger) { Options options; options.write_buffer_size = 100<<10; //100KB options.num_levels = 3; @@ -3601,7 +3599,7 @@ Options DeletionTriggerOptions() { } } // anonymous namespace -TEST(DBTest, CompactionDeletionTrigger) { +TEST_F(DBTest, CompactionDeletionTrigger) { for (int tid = 0; tid < 2; ++tid) { uint64_t db_size[2]; Options options = CurrentOptions(DeletionTriggerOptions()); @@ -3637,7 +3635,7 @@ TEST(DBTest, CompactionDeletionTrigger) { } } -TEST(DBTest, CompactionDeletionTriggerReopen) { +TEST_F(DBTest, CompactionDeletionTriggerReopen) { for (int tid = 0; tid < 2; ++tid) { uint64_t db_size[3]; Options options = CurrentOptions(DeletionTriggerOptions()); @@ -3823,7 +3821,7 @@ class ChangeFilterFactory : public CompactionFilterFactory { // 1. A lot of magic numbers ("11" or "12"). // 2. Made assumption on the memtable flush conditions, which may change from // time to time. -TEST(DBTest, UniversalCompactionTrigger) { +TEST_F(DBTest, UniversalCompactionTrigger) { Options options; options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 100<<10; //100KB @@ -3961,7 +3959,7 @@ TEST(DBTest, UniversalCompactionTrigger) { } } -TEST(DBTest, UniversalCompactionSizeAmplification) { +TEST_F(DBTest, UniversalCompactionSizeAmplification) { Options options; options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 100<<10; //100KB @@ -4001,7 +3999,7 @@ TEST(DBTest, UniversalCompactionSizeAmplification) { ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); } -TEST(DBTest, UniversalCompactionOptions) { +TEST_F(DBTest, UniversalCompactionOptions) { Options options; options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 100<<10; //100KB @@ -4034,7 +4032,7 @@ TEST(DBTest, UniversalCompactionOptions) { } } -TEST(DBTest, UniversalCompactionStopStyleSimilarSize) { +TEST_F(DBTest, UniversalCompactionStopStyleSimilarSize) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 100<<10; //100KB @@ -4117,7 +4115,7 @@ TEST(DBTest, UniversalCompactionStopStyleSimilarSize) { ASSERT_EQ(NumTableFilesAtLevel(0), 4); } -TEST(DBTest, CompressedCache) { +TEST_F(DBTest, CompressedCache) { if (!SnappyCompressionSupported()) { return; } @@ -4241,7 +4239,7 @@ static std::string CompressibleString(Random* rnd, int len) { return r; } -TEST(DBTest, UniversalCompactionCompressRatio1) { +TEST_F(DBTest, UniversalCompactionCompressRatio1) { if (!SnappyCompressionSupported()) { return; } @@ -4309,7 +4307,7 @@ TEST(DBTest, UniversalCompactionCompressRatio1) { 110000 * 11 * 0.8 + 110000 * 2); } -TEST(DBTest, UniversalCompactionCompressRatio2) { +TEST_F(DBTest, UniversalCompactionCompressRatio2) { if (!SnappyCompressionSupported()) { return; } @@ -4340,7 +4338,7 @@ TEST(DBTest, UniversalCompactionCompressRatio2) { 120000 * 12 * 0.8 + 120000 * 2); } -TEST(DBTest, FailMoreDbPaths) { +TEST_F(DBTest, FailMoreDbPaths) { Options options = CurrentOptions(); options.db_paths.emplace_back(dbname_, 10000000); options.db_paths.emplace_back(dbname_ + "_2", 1000000); @@ -4350,7 +4348,7 @@ TEST(DBTest, FailMoreDbPaths) { ASSERT_TRUE(TryReopen(options).IsNotSupported()); } -TEST(DBTest, UniversalCompactionSecondPathRatio) { +TEST_F(DBTest, UniversalCompactionSecondPathRatio) { if (!SnappyCompressionSupported()) { return; } @@ -4447,7 +4445,7 @@ TEST(DBTest, UniversalCompactionSecondPathRatio) { Destroy(options); } -TEST(DBTest, LevelCompactionThirdPath) { +TEST_F(DBTest, LevelCompactionThirdPath) { Options options = CurrentOptions(); options.db_paths.emplace_back(dbname_, 500 * 1024); options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024); @@ -4560,7 +4558,7 @@ TEST(DBTest, LevelCompactionThirdPath) { Destroy(options); } -TEST(DBTest, LevelCompactionPathUse) { +TEST_F(DBTest, LevelCompactionPathUse) { Options options = CurrentOptions(); options.db_paths.emplace_back(dbname_, 500 * 1024); options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024); @@ -4674,7 +4672,7 @@ TEST(DBTest, LevelCompactionPathUse) { Destroy(options); } -TEST(DBTest, UniversalCompactionFourPaths) { +TEST_F(DBTest, UniversalCompactionFourPaths) { Options options; options.db_paths.emplace_back(dbname_, 300 * 1024); options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024); @@ -4792,7 +4790,7 @@ void CheckColumnFamilyMeta(const ColumnFamilyMetaData& cf_meta) { ASSERT_EQ(cf_meta.size, cf_size); } -TEST(DBTest, ColumnFamilyMetaDataTest) { +TEST_F(DBTest, ColumnFamilyMetaDataTest) { Options options = CurrentOptions(); options.create_if_missing = true; DestroyAndReopen(options); @@ -4807,7 +4805,7 @@ TEST(DBTest, ColumnFamilyMetaDataTest) { } } -TEST(DBTest, ConvertCompactionStyle) { +TEST_F(DBTest, ConvertCompactionStyle) { Random rnd(301); int max_key_level_insert = 200; int max_key_universal_insert = 600; @@ -4981,7 +4979,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits, } } // namespace -TEST(DBTest, MinLevelToCompress1) { +TEST_F(DBTest, MinLevelToCompress1) { Options options = CurrentOptions(); CompressionType type = kSnappyCompression; if (!MinLevelToCompress(type, options, -14, -1, 0)) { @@ -5001,7 +4999,7 @@ TEST(DBTest, MinLevelToCompress1) { MinLevelHelper(this, options); } -TEST(DBTest, MinLevelToCompress2) { +TEST_F(DBTest, MinLevelToCompress2) { Options options = CurrentOptions(); CompressionType type = kSnappyCompression; if (!MinLevelToCompress(type, options, 15, -1, 0)) { @@ -5021,7 +5019,7 @@ TEST(DBTest, MinLevelToCompress2) { MinLevelHelper(this, options); } -TEST(DBTest, RepeatedWritesToSameKey) { +TEST_F(DBTest, RepeatedWritesToSameKey) { do { Options options; options.env = env_; @@ -5044,7 +5042,7 @@ TEST(DBTest, RepeatedWritesToSameKey) { } while (ChangeCompactOptions()); } -TEST(DBTest, InPlaceUpdate) { +TEST_F(DBTest, InPlaceUpdate) { do { Options options; options.create_if_missing = true; @@ -5068,7 +5066,7 @@ TEST(DBTest, InPlaceUpdate) { } while (ChangeCompactOptions()); } -TEST(DBTest, InPlaceUpdateLargeNewValue) { +TEST_F(DBTest, InPlaceUpdateLargeNewValue) { do { Options options; options.create_if_missing = true; @@ -5092,8 +5090,7 @@ TEST(DBTest, InPlaceUpdateLargeNewValue) { } while (ChangeCompactOptions()); } - -TEST(DBTest, InPlaceUpdateCallbackSmallerSize) { +TEST_F(DBTest, InPlaceUpdateCallbackSmallerSize) { do { Options options; options.create_if_missing = true; @@ -5122,7 +5119,7 @@ TEST(DBTest, InPlaceUpdateCallbackSmallerSize) { } while (ChangeCompactOptions()); } -TEST(DBTest, InPlaceUpdateCallbackSmallerVarintSize) { +TEST_F(DBTest, InPlaceUpdateCallbackSmallerVarintSize) { do { Options options; options.create_if_missing = true; @@ -5151,7 +5148,7 @@ TEST(DBTest, InPlaceUpdateCallbackSmallerVarintSize) { } while (ChangeCompactOptions()); } -TEST(DBTest, InPlaceUpdateCallbackLargeNewValue) { +TEST_F(DBTest, InPlaceUpdateCallbackLargeNewValue) { do { Options options; options.create_if_missing = true; @@ -5178,7 +5175,7 @@ TEST(DBTest, InPlaceUpdateCallbackLargeNewValue) { } while (ChangeCompactOptions()); } -TEST(DBTest, InPlaceUpdateCallbackNoAction) { +TEST_F(DBTest, InPlaceUpdateCallbackNoAction) { do { Options options; options.create_if_missing = true; @@ -5198,7 +5195,7 @@ TEST(DBTest, InPlaceUpdateCallbackNoAction) { } while (ChangeCompactOptions()); } -TEST(DBTest, CompactionFilter) { +TEST_F(DBTest, CompactionFilter) { Options options = CurrentOptions(); options.max_open_files = -1; options.num_levels = 3; @@ -5349,7 +5346,7 @@ TEST(DBTest, CompactionFilter) { // Tests the edge case where compaction does not produce any output -- all // entries are deleted. The compaction should create bunch of 'DeleteFile' // entries in VersionEdit, but none of the 'AddFile's. -TEST(DBTest, CompactionFilterDeletesAll) { +TEST_F(DBTest, CompactionFilterDeletesAll) { Options options; options.compaction_filter_factory = std::make_shared(); options.disable_auto_compactions = true; @@ -5379,7 +5376,7 @@ TEST(DBTest, CompactionFilterDeletesAll) { delete itr; } -TEST(DBTest, CompactionFilterWithValueChange) { +TEST_F(DBTest, CompactionFilterWithValueChange) { do { Options options; options.num_levels = 3; @@ -5430,7 +5427,7 @@ TEST(DBTest, CompactionFilterWithValueChange) { } while (ChangeCompactOptions()); } -TEST(DBTest, CompactionFilterWithMergeOperator) { +TEST_F(DBTest, CompactionFilterWithMergeOperator) { std::string one, two, three, four; PutFixed64(&one, 1); PutFixed64(&two, 2); @@ -5499,7 +5496,7 @@ TEST(DBTest, CompactionFilterWithMergeOperator) { ASSERT_EQ(newvalue, four); } -TEST(DBTest, CompactionFilterContextManual) { +TEST_F(DBTest, CompactionFilterContextManual) { KeepFilterFactory* filter = new KeepFilterFactory(); Options options = CurrentOptions(); @@ -5675,7 +5672,7 @@ class ChangeFilterFactoryV2 : public CompactionFilterFactoryV2 { } }; -TEST(DBTest, CompactionFilterV2) { +TEST_F(DBTest, CompactionFilterV2) { Options options = CurrentOptions(); options.num_levels = 3; options.max_mem_compaction_level = 0; @@ -5765,7 +5762,7 @@ TEST(DBTest, CompactionFilterV2) { delete iter; } -TEST(DBTest, CompactionFilterV2WithValueChange) { +TEST_F(DBTest, CompactionFilterV2WithValueChange) { Options options = CurrentOptions(); options.num_levels = 3; options.max_mem_compaction_level = 0; @@ -5807,7 +5804,7 @@ TEST(DBTest, CompactionFilterV2WithValueChange) { } } -TEST(DBTest, CompactionFilterV2NULLPrefix) { +TEST_F(DBTest, CompactionFilterV2NULLPrefix) { Options options = CurrentOptions(); options.num_levels = 3; options.max_mem_compaction_level = 0; @@ -5858,7 +5855,7 @@ TEST(DBTest, CompactionFilterV2NULLPrefix) { } } -TEST(DBTest, SparseMerge) { +TEST_F(DBTest, SparseMerge) { do { Options options = CurrentOptions(); options.compression = kNoCompression; @@ -5914,7 +5911,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { return result; } -TEST(DBTest, ApproximateSizes) { +TEST_F(DBTest, ApproximateSizes) { do { Options options; options.write_buffer_size = 100000000; // Large write buffer @@ -5971,7 +5968,7 @@ TEST(DBTest, ApproximateSizes) { kSkipPlainTable | kSkipHashIndex)); } -TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { +TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { do { Options options = CurrentOptions(); options.compression = kNoCompression; @@ -6010,7 +6007,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { } while (ChangeOptions(kSkipPlainTable)); } -TEST(DBTest, IteratorPinsRef) { +TEST_F(DBTest, IteratorPinsRef) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); Put(1, "foo", "hello"); @@ -6036,7 +6033,7 @@ TEST(DBTest, IteratorPinsRef) { } while (ChangeCompactOptions()); } -TEST(DBTest, Snapshot) { +TEST_F(DBTest, Snapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -6099,7 +6096,7 @@ TEST(DBTest, Snapshot) { } while (ChangeOptions(kSkipHashCuckoo)); } -TEST(DBTest, HiddenValuesAreRemoved) { +TEST_F(DBTest, HiddenValuesAreRemoved) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -6139,7 +6136,7 @@ TEST(DBTest, HiddenValuesAreRemoved) { kSkipPlainTable | kSkipHashCuckoo)); } -TEST(DBTest, CompactBetweenSnapshots) { +TEST_F(DBTest, CompactBetweenSnapshots) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -6197,7 +6194,7 @@ TEST(DBTest, CompactBetweenSnapshots) { } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction)); } -TEST(DBTest, DeletionMarkers1) { +TEST_F(DBTest, DeletionMarkers1) { Options options = CurrentOptions(); options.max_background_flushes = 0; CreateAndReopenWithCF({"pikachu"}, options); @@ -6234,7 +6231,7 @@ TEST(DBTest, DeletionMarkers1) { ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]"); } -TEST(DBTest, DeletionMarkers2) { +TEST_F(DBTest, DeletionMarkers2) { Options options = CurrentOptions(); options.max_background_flushes = 0; CreateAndReopenWithCF({"pikachu"}, options); @@ -6264,7 +6261,7 @@ TEST(DBTest, DeletionMarkers2) { ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); } -TEST(DBTest, OverlapInLevel0) { +TEST_F(DBTest, OverlapInLevel0) { do { Options options = CurrentOptions(); options.max_background_flushes = 0; @@ -6309,7 +6306,7 @@ TEST(DBTest, OverlapInLevel0) { } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); } -TEST(DBTest, L0_CompactionBug_Issue44_a) { +TEST_F(DBTest, L0_CompactionBug_Issue44_a) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "b", "v")); @@ -6328,7 +6325,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) { } while (ChangeCompactOptions()); } -TEST(DBTest, L0_CompactionBug_Issue44_b) { +TEST_F(DBTest, L0_CompactionBug_Issue44_b) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); Put(1, "", ""); @@ -6356,7 +6353,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) { } while (ChangeCompactOptions()); } -TEST(DBTest, ComparatorCheck) { +TEST_F(DBTest, ComparatorCheck) { class NewComparator : public Comparator { public: virtual const char* Name() const override { @@ -6389,7 +6386,7 @@ TEST(DBTest, ComparatorCheck) { } while (ChangeCompactOptions()); } -TEST(DBTest, CustomComparator) { +TEST_F(DBTest, CustomComparator) { class NumberComparator : public Comparator { public: virtual const char* Name() const override { @@ -6451,7 +6448,7 @@ TEST(DBTest, CustomComparator) { } while (ChangeCompactOptions()); } -TEST(DBTest, ManualCompaction) { +TEST_F(DBTest, ManualCompaction) { Options options = CurrentOptions(); options.max_background_flushes = 0; CreateAndReopenWithCF({"pikachu"}, options); @@ -6502,7 +6499,7 @@ TEST(DBTest, ManualCompaction) { } -TEST(DBTest, ManualCompactionOutputPathId) { +TEST_F(DBTest, ManualCompactionOutputPathId) { Options options = CurrentOptions(); options.create_if_missing = true; options.db_paths.emplace_back(dbname_, 1000000000); @@ -6550,7 +6547,7 @@ TEST(DBTest, ManualCompactionOutputPathId) { .IsInvalidArgument()); } -TEST(DBTest, ManualLevelCompactionOutputPathId) { +TEST_F(DBTest, ManualLevelCompactionOutputPathId) { Options options = CurrentOptions(); options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760); options.db_paths.emplace_back(dbname_ + "_3", 100 * 10485760); @@ -6619,7 +6616,7 @@ TEST(DBTest, ManualLevelCompactionOutputPathId) { } } -TEST(DBTest, DBOpen_Options) { +TEST_F(DBTest, DBOpen_Options) { Options options = CurrentOptions(); std::string dbname = test::TmpDir(env_) + "/db_options_test"; ASSERT_OK(DestroyDB(dbname, options)); @@ -6658,7 +6655,7 @@ TEST(DBTest, DBOpen_Options) { db = nullptr; } -TEST(DBTest, DBOpen_Change_NumLevels) { +TEST_F(DBTest, DBOpen_Change_NumLevels) { Options options = CurrentOptions(); options.create_if_missing = true; options.max_background_flushes = 0; @@ -6678,7 +6675,7 @@ TEST(DBTest, DBOpen_Change_NumLevels) { ASSERT_TRUE(db_ == nullptr); } -TEST(DBTest, DestroyDBMetaDatabase) { +TEST_F(DBTest, DestroyDBMetaDatabase) { std::string dbname = test::TmpDir(env_) + "/db_meta"; ASSERT_OK(env_->CreateDirIfMissing(dbname)); std::string metadbname = MetaDatabaseName(dbname, 0); @@ -6715,7 +6712,7 @@ TEST(DBTest, DestroyDBMetaDatabase) { } // Check that number of files does not grow when writes are dropped -TEST(DBTest, DropWrites) { +TEST_F(DBTest, DropWrites) { do { Options options = CurrentOptions(); options.env = env_; @@ -6748,7 +6745,7 @@ TEST(DBTest, DropWrites) { } // Check background error counter bumped on flush failures. -TEST(DBTest, DropWritesFlush) { +TEST_F(DBTest, DropWritesFlush) { do { Options options = CurrentOptions(); options.env = env_; @@ -6775,7 +6772,7 @@ TEST(DBTest, DropWritesFlush) { // Check that CompactRange() returns failure if there is not enough space left // on device -TEST(DBTest, NoSpaceCompactRange) { +TEST_F(DBTest, NoSpaceCompactRange) { do { Options options = CurrentOptions(); options.env = env_; @@ -6798,7 +6795,7 @@ TEST(DBTest, NoSpaceCompactRange) { } while (ChangeCompactOptions()); } -TEST(DBTest, NonWritableFileSystem) { +TEST_F(DBTest, NonWritableFileSystem) { do { Options options = CurrentOptions(); options.write_buffer_size = 1000; @@ -6819,7 +6816,7 @@ TEST(DBTest, NonWritableFileSystem) { } while (ChangeCompactOptions()); } -TEST(DBTest, ManifestWriteError) { +TEST_F(DBTest, ManifestWriteError) { // Test for the following problem: // (a) Compaction produces file F // (b) Log record containing F is written to MANIFEST file, but Sync() fails @@ -6861,7 +6858,7 @@ TEST(DBTest, ManifestWriteError) { } } -TEST(DBTest, PutFailsParanoid) { +TEST_F(DBTest, PutFailsParanoid) { // Test the following: // (a) A random put fails in paranoid mode (simulate by sync fail) // (b) All other puts have to fail, even if writes would succeed @@ -6906,7 +6903,7 @@ TEST(DBTest, PutFailsParanoid) { ASSERT_TRUE(s.ok()); } -TEST(DBTest, FilesDeletedAfterCompaction) { +TEST_F(DBTest, FilesDeletedAfterCompaction) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v2")); @@ -6920,7 +6917,7 @@ TEST(DBTest, FilesDeletedAfterCompaction) { } while (ChangeCompactOptions()); } -TEST(DBTest, BloomFilter) { +TEST_F(DBTest, BloomFilter) { do { Options options = CurrentOptions(); env_->count_random_reads_ = true; @@ -6972,7 +6969,7 @@ TEST(DBTest, BloomFilter) { } while (ChangeCompactOptions()); } -TEST(DBTest, BloomFilterRate) { +TEST_F(DBTest, BloomFilterRate) { while (ChangeFilterOptions()) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); @@ -7000,7 +6997,7 @@ TEST(DBTest, BloomFilterRate) { } } -TEST(DBTest, BloomFilterCompatibility) { +TEST_F(DBTest, BloomFilterCompatibility) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); BlockBasedTableOptions table_options; @@ -7029,7 +7026,7 @@ TEST(DBTest, BloomFilterCompatibility) { ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); } -TEST(DBTest, BloomFilterReverseCompatibility) { +TEST_F(DBTest, BloomFilterReverseCompatibility) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); BlockBasedTableOptions table_options; @@ -7097,7 +7094,7 @@ class WrappedBloom : public FilterPolicy { }; } // namespace -TEST(DBTest, BloomFilterWrapper) { +TEST_F(DBTest, BloomFilterWrapper) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); @@ -7132,7 +7129,7 @@ TEST(DBTest, BloomFilterWrapper) { ASSERT_EQ(2U * maxKey, policy->GetCounter()); } -TEST(DBTest, SnapshotFiles) { +TEST_F(DBTest, SnapshotFiles) { do { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer @@ -7260,7 +7257,7 @@ TEST(DBTest, SnapshotFiles) { } while (ChangeCompactOptions()); } -TEST(DBTest, CompactOnFlush) { +TEST_F(DBTest, CompactOnFlush) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -7371,7 +7368,7 @@ std::vector ListTableFiles(Env* env, const std::string& path) { } } // namespace -TEST(DBTest, FlushOneColumnFamily) { +TEST_F(DBTest, FlushOneColumnFamily) { Options options = CurrentOptions(); CreateAndReopenWithCF({"pikachu", "ilya", "muromec", "dobrynia", "nikitich", "alyosha", "popovich"}, @@ -7398,7 +7395,7 @@ TEST(DBTest, FlushOneColumnFamily) { // memtable was flushed, even it was empty. Now it's changed: // we try to create the smallest number of table files by merging // updates from multiple logs -TEST(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) { +TEST_F(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) { Options options = CurrentOptions(); options.write_buffer_size = 5000000; CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); @@ -7453,7 +7450,7 @@ TEST(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) { // memtable was flushed, even it wasn't empty. Now it's changed: // we try to create the smallest number of table files by merging // updates from multiple logs -TEST(DBTest, RecoverCheckFileAmount) { +TEST_F(DBTest, RecoverCheckFileAmount) { Options options = CurrentOptions(); options.write_buffer_size = 100000; CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); @@ -7517,7 +7514,7 @@ TEST(DBTest, RecoverCheckFileAmount) { } } -TEST(DBTest, SharedWriteBuffer) { +TEST_F(DBTest, SharedWriteBuffer) { Options options = CurrentOptions(); options.db_write_buffer_size = 100000; // this is the real limit options.write_buffer_size = 500000; // this is never hit @@ -7597,7 +7594,7 @@ TEST(DBTest, SharedWriteBuffer) { } } -TEST(DBTest, PurgeInfoLogs) { +TEST_F(DBTest, PurgeInfoLogs) { Options options = CurrentOptions(); options.keep_log_file_num = 5; options.create_if_missing = true; @@ -7670,7 +7667,7 @@ void ExpectRecords( } } // namespace -TEST(DBTest, TransactionLogIterator) { +TEST_F(DBTest, TransactionLogIterator) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -7698,7 +7695,7 @@ TEST(DBTest, TransactionLogIterator) { } #ifndef NDEBUG // sync point is not included with DNDEBUG build -TEST(DBTest, TransactionLogIteratorRace) { +TEST_F(DBTest, TransactionLogIteratorRace) { static const int LOG_ITERATOR_RACE_TEST_COUNT = 2; static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = { {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1", @@ -7754,7 +7751,7 @@ TEST(DBTest, TransactionLogIteratorRace) { } #endif -TEST(DBTest, TransactionLogIteratorStallAtLastRecord) { +TEST_F(DBTest, TransactionLogIteratorStallAtLastRecord) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -7772,7 +7769,7 @@ TEST(DBTest, TransactionLogIteratorStallAtLastRecord) { } while (ChangeCompactOptions()); } -TEST(DBTest, TransactionLogIteratorCheckAfterRestart) { +TEST_F(DBTest, TransactionLogIteratorCheckAfterRestart) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -7785,7 +7782,7 @@ TEST(DBTest, TransactionLogIteratorCheckAfterRestart) { } while (ChangeCompactOptions()); } -TEST(DBTest, TransactionLogIteratorCorruptedLog) { +TEST_F(DBTest, TransactionLogIteratorCorruptedLog) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -7818,7 +7815,7 @@ TEST(DBTest, TransactionLogIteratorCorruptedLog) { } while (ChangeCompactOptions()); } -TEST(DBTest, TransactionLogIteratorBatchOperations) { +TEST_F(DBTest, TransactionLogIteratorBatchOperations) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -7838,7 +7835,7 @@ TEST(DBTest, TransactionLogIteratorBatchOperations) { } while (ChangeCompactOptions()); } -TEST(DBTest, TransactionLogIteratorBlobs) { +TEST_F(DBTest, TransactionLogIteratorBlobs) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); @@ -7997,7 +7994,7 @@ static void MTThreadBody(void* arg) { } // namespace -TEST(DBTest, MultiThreaded) { +TEST_F(DBTest, MultiThreaded) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { @@ -8064,7 +8061,7 @@ static void GCThreadBody(void* arg) { } // namespace -TEST(DBTest, GroupCommitTest) { +TEST_F(DBTest, GroupCommitTest) { do { Options options = CurrentOptions(); options.env = env_; @@ -8438,7 +8435,7 @@ static bool CompareIterators(int step, return ok; } -TEST(DBTest, Randomized) { +TEST_F(DBTest, Randomized) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; Random rnd(test::RandomSeed()); @@ -8534,7 +8531,7 @@ TEST(DBTest, Randomized) { kSkipHashCuckoo)); } -TEST(DBTest, MultiGetSimple) { +TEST_F(DBTest, MultiGetSimple) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "k1", "v1")); @@ -8566,7 +8563,7 @@ TEST(DBTest, MultiGetSimple) { } while (ChangeCompactOptions()); } -TEST(DBTest, MultiGetEmpty) { +TEST_F(DBTest, MultiGetEmpty) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); // Empty Key Set @@ -8647,7 +8644,7 @@ void PrefixScanInit(DBTest *dbtest) { } } // namespace -TEST(DBTest, PrefixScan) { +TEST_F(DBTest, PrefixScan) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, kSkipNoPrefix); while (ChangeFilterOptions()) { @@ -8696,7 +8693,7 @@ TEST(DBTest, PrefixScan) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); } -TEST(DBTest, TailingIteratorSingle) { +TEST_F(DBTest, TailingIteratorSingle) { ReadOptions read_options; read_options.tailing = true; @@ -8714,7 +8711,7 @@ TEST(DBTest, TailingIteratorSingle) { ASSERT_TRUE(!iter->Valid()); } -TEST(DBTest, TailingIteratorKeepAdding) { +TEST_F(DBTest, TailingIteratorKeepAdding) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -8736,7 +8733,7 @@ TEST(DBTest, TailingIteratorKeepAdding) { } } -TEST(DBTest, TailingIteratorSeekToNext) { +TEST_F(DBTest, TailingIteratorSeekToNext) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -8783,7 +8780,7 @@ TEST(DBTest, TailingIteratorSeekToNext) { } } -TEST(DBTest, TailingIteratorDeletes) { +TEST_F(DBTest, TailingIteratorDeletes) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -8822,7 +8819,7 @@ TEST(DBTest, TailingIteratorDeletes) { ASSERT_EQ(count, num_records); } -TEST(DBTest, TailingIteratorPrefixSeek) { +TEST_F(DBTest, TailingIteratorPrefixSeek) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, kSkipNoPrefix); ReadOptions read_options; @@ -8857,7 +8854,7 @@ TEST(DBTest, TailingIteratorPrefixSeek) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); } -TEST(DBTest, TailingIteratorIncomplete) { +TEST_F(DBTest, TailingIteratorIncomplete) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -8879,7 +8876,7 @@ TEST(DBTest, TailingIteratorIncomplete) { ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); } -TEST(DBTest, TailingIteratorSeekToSame) { +TEST_F(DBTest, TailingIteratorSeekToSame) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 1000; @@ -8914,7 +8911,7 @@ TEST(DBTest, TailingIteratorSeekToSame) { ASSERT_EQ(found, iter->key().ToString()); } -TEST(DBTest, ManagedTailingIteratorSingle) { +TEST_F(DBTest, ManagedTailingIteratorSingle) { ReadOptions read_options; read_options.tailing = true; read_options.managed = true; @@ -8933,7 +8930,7 @@ TEST(DBTest, ManagedTailingIteratorSingle) { ASSERT_TRUE(!iter->Valid()); } -TEST(DBTest, ManagedTailingIteratorKeepAdding) { +TEST_F(DBTest, ManagedTailingIteratorKeepAdding) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -8956,7 +8953,7 @@ TEST(DBTest, ManagedTailingIteratorKeepAdding) { } } -TEST(DBTest, ManagedTailingIteratorSeekToNext) { +TEST_F(DBTest, ManagedTailingIteratorSeekToNext) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -9004,7 +9001,7 @@ TEST(DBTest, ManagedTailingIteratorSeekToNext) { } } -TEST(DBTest, ManagedTailingIteratorDeletes) { +TEST_F(DBTest, ManagedTailingIteratorDeletes) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -9045,7 +9042,7 @@ TEST(DBTest, ManagedTailingIteratorDeletes) { ASSERT_EQ(count, num_records); } -TEST(DBTest, ManagedTailingIteratorPrefixSeek) { +TEST_F(DBTest, ManagedTailingIteratorPrefixSeek) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, kSkipNoPrefix); ReadOptions read_options; @@ -9081,7 +9078,7 @@ TEST(DBTest, ManagedTailingIteratorPrefixSeek) { XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); } -TEST(DBTest, ManagedTailingIteratorIncomplete) { +TEST_F(DBTest, ManagedTailingIteratorIncomplete) { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ReadOptions read_options; read_options.tailing = true; @@ -9104,7 +9101,7 @@ TEST(DBTest, ManagedTailingIteratorIncomplete) { ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); } -TEST(DBTest, ManagedTailingIteratorSeekToSame) { +TEST_F(DBTest, ManagedTailingIteratorSeekToSame) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleUniversal; options.write_buffer_size = 1000; @@ -9140,7 +9137,7 @@ TEST(DBTest, ManagedTailingIteratorSeekToSame) { ASSERT_EQ(found, iter->key().ToString()); } -TEST(DBTest, BlockBasedTablePrefixIndexTest) { +TEST_F(DBTest, BlockBasedTablePrefixIndexTest) { // create a DB with block prefix index BlockBasedTableOptions table_options; Options options = CurrentOptions(); @@ -9165,7 +9162,7 @@ TEST(DBTest, BlockBasedTablePrefixIndexTest) { ASSERT_EQ("v2", Get("k2")); } -TEST(DBTest, ChecksumTest) { +TEST_F(DBTest, ChecksumTest) { BlockBasedTableOptions table_options; Options options = CurrentOptions(); @@ -9200,7 +9197,7 @@ TEST(DBTest, ChecksumTest) { ASSERT_EQ("h", Get("g")); } -TEST(DBTest, FIFOCompactionTest) { +TEST_F(DBTest, FIFOCompactionTest) { for (int iter = 0; iter < 2; ++iter) { // first iteration -- auto compaction // second iteration -- manual compaction @@ -9237,7 +9234,7 @@ TEST(DBTest, FIFOCompactionTest) { } } -TEST(DBTest, SimpleWriteTimeoutTest) { +TEST_F(DBTest, SimpleWriteTimeoutTest) { // Block compaction thread, which will also block the flushes because // max_background_flushes == 0, so flushes are getting executed by the // compaction thread @@ -9331,7 +9328,7 @@ static void RandomTimeoutWriter(void* arg) { state->done = true; } -TEST(DBTest, MTRandomTimeoutTest) { +TEST_F(DBTest, MTRandomTimeoutTest) { Options options; options.env = env_; options.create_if_missing = true; @@ -9366,7 +9363,7 @@ TEST(DBTest, MTRandomTimeoutTest) { } } -TEST(DBTest, Level0StopWritesTest) { +TEST_F(DBTest, Level0StopWritesTest) { Options options = CurrentOptions(); options.level0_slowdown_writes_trigger = 2; options.level0_stop_writes_trigger = 4; @@ -9391,7 +9388,7 @@ TEST(DBTest, Level0StopWritesTest) { /* * This test is not reliable enough as it heavily depends on disk behavior. */ -TEST(DBTest, RateLimitingTest) { +TEST_F(DBTest, RateLimitingTest) { Options options = CurrentOptions(); options.write_buffer_size = 1 << 20; // 1MB options.level0_file_num_compaction_trigger = 2; @@ -9543,7 +9540,7 @@ namespace { } } // namespace -TEST(DBTest, CompactFilesOnLevelCompaction) { +TEST_F(DBTest, CompactFilesOnLevelCompaction) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -9598,7 +9595,7 @@ TEST(DBTest, CompactFilesOnLevelCompaction) { } } -TEST(DBTest, CompactFilesOnUniversalCompaction) { +TEST_F(DBTest, CompactFilesOnUniversalCompaction) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -9667,7 +9664,7 @@ TEST(DBTest, CompactFilesOnUniversalCompaction) { ASSERT_EQ(cf_meta.levels[0].files.size(), 1U); } -TEST(DBTest, TableOptionsSanitizeTest) { +TEST_F(DBTest, TableOptionsSanitizeTest) { Options options = CurrentOptions(); options.create_if_missing = true; DestroyAndReopen(options); @@ -9690,7 +9687,7 @@ TEST(DBTest, TableOptionsSanitizeTest) { ASSERT_OK(TryReopen(options)); } -TEST(DBTest, SanitizeNumThreads) { +TEST_F(DBTest, SanitizeNumThreads) { for (int attempt = 0; attempt < 2; attempt++) { const size_t kTotalTasks = 8; SleepingBackgroundTask sleeping_tasks[kTotalTasks]; @@ -9729,7 +9726,7 @@ TEST(DBTest, SanitizeNumThreads) { } } -TEST(DBTest, DBIteratorBoundTest) { +TEST_F(DBTest, DBIteratorBoundTest) { Options options = CurrentOptions(); options.env = env_; options.create_if_missing = true; @@ -9870,7 +9867,7 @@ TEST(DBTest, DBIteratorBoundTest) { } } -TEST(DBTest, WriteSingleThreadEntry) { +TEST_F(DBTest, WriteSingleThreadEntry) { std::vector threads; dbfull()->TEST_LockMutex(); auto w = dbfull()->TEST_BeginWrite(); @@ -9888,7 +9885,7 @@ TEST(DBTest, WriteSingleThreadEntry) { } } -TEST(DBTest, DisableDataSyncTest) { +TEST_F(DBTest, DisableDataSyncTest) { env_->sync_counter_.store(0); // iter 0 -- no sync // iter 1 -- sync @@ -9912,7 +9909,7 @@ TEST(DBTest, DisableDataSyncTest) { } } -TEST(DBTest, DynamicMemtableOptions) { +TEST_F(DBTest, DynamicMemtableOptions) { const uint64_t k64KB = 1 << 16; const uint64_t k128KB = 1 << 17; const uint64_t k5KB = 5 * 1024; @@ -10045,7 +10042,7 @@ void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type, } } // namespace -TEST(DBTest, GetThreadStatus) { +TEST_F(DBTest, GetThreadStatus) { Options options; options.env = env_; options.enable_thread_tracking = true; @@ -10104,7 +10101,7 @@ TEST(DBTest, GetThreadStatus) { handles_, true); } -TEST(DBTest, DisableThreadStatus) { +TEST_F(DBTest, DisableThreadStatus) { Options options; options.env = env_; options.enable_thread_tracking = false; @@ -10115,7 +10112,7 @@ TEST(DBTest, DisableThreadStatus) { handles_, false); } -TEST(DBTest, ThreadStatusFlush) { +TEST_F(DBTest, ThreadStatusFlush) { Options options; options.env = env_; options.write_buffer_size = 100000; // Small write buffer @@ -10147,7 +10144,7 @@ TEST(DBTest, ThreadStatusFlush) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } -TEST(DBTest, ThreadStatusSingleCompaction) { +TEST_F(DBTest, ThreadStatusSingleCompaction) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -10208,7 +10205,7 @@ TEST(DBTest, ThreadStatusSingleCompaction) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } -TEST(DBTest, PreShutdownManualCompaction) { +TEST_F(DBTest, PreShutdownManualCompaction) { Options options = CurrentOptions(); options.max_background_flushes = 0; CreateAndReopenWithCF({"pikachu"}, options); @@ -10259,7 +10256,7 @@ TEST(DBTest, PreShutdownManualCompaction) { } } -TEST(DBTest, PreShutdownMultipleCompaction) { +TEST_F(DBTest, PreShutdownMultipleCompaction) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -10349,7 +10346,7 @@ TEST(DBTest, PreShutdownMultipleCompaction) { ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0); } -TEST(DBTest, PreShutdownCompactionMiddle) { +TEST_F(DBTest, PreShutdownCompactionMiddle) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -10439,7 +10436,7 @@ TEST(DBTest, PreShutdownCompactionMiddle) { #endif // ROCKSDB_USING_THREAD_STATUS -TEST(DBTest, DynamicLevelMaxBytesBase) { +TEST_F(DBTest, DynamicLevelMaxBytesBase) { // Use InMemoryEnv, or it would be too slow. unique_ptr env(new MockEnv(env_)); @@ -10534,7 +10531,7 @@ TEST(DBTest, DynamicLevelMaxBytesBase) { } // Test specific cases in dynamic max bytes -TEST(DBTest, DynamicLevelMaxBytesBase2) { +TEST_F(DBTest, DynamicLevelMaxBytesBase2) { Random rnd(301); int kMaxKey = 1000000; @@ -10677,7 +10674,7 @@ TEST(DBTest, DynamicLevelMaxBytesBase2) { ASSERT_EQ(1U, int_prop); } -TEST(DBTest, DynamicLevelCompressionPerLevel) { +TEST_F(DBTest, DynamicLevelCompressionPerLevel) { if (!SnappyCompressionSupported()) { return; } @@ -10752,7 +10749,7 @@ TEST(DBTest, DynamicLevelCompressionPerLevel) { ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U); } -TEST(DBTest, DynamicLevelCompressionPerLevel2) { +TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { const int kNKeys = 500; int keys[kNKeys]; for (int i = 0; i < kNKeys; i++) { @@ -10845,7 +10842,7 @@ TEST(DBTest, DynamicLevelCompressionPerLevel2) { mock::MockTableBuilder::finish_cb_ = nullptr; } -TEST(DBTest, DynamicCompactionOptions) { +TEST_F(DBTest, DynamicCompactionOptions) { // minimum write buffer size is enforced at 64KB const uint64_t k32KB = 1 << 15; const uint64_t k64KB = 1 << 16; @@ -11139,7 +11136,7 @@ TEST(DBTest, DynamicCompactionOptions) { ASSERT_EQ(NumTableFilesAtLevel(2), 1); } -TEST(DBTest, FileCreationRandomFailure) { +TEST_F(DBTest, FileCreationRandomFailure) { Options options; options.env = env_; options.create_if_missing = true; @@ -11198,7 +11195,7 @@ TEST(DBTest, FileCreationRandomFailure) { } } -TEST(DBTest, PartialCompactionFailure) { +TEST_F(DBTest, PartialCompactionFailure) { Options options; const int kKeySize = 16; const int kKvSize = 1000; @@ -11280,7 +11277,7 @@ TEST(DBTest, PartialCompactionFailure) { } } -TEST(DBTest, DynamicMiscOptions) { +TEST_F(DBTest, DynamicMiscOptions) { // Test max_sequential_skip_in_iterations Options options; options.env = env_; @@ -11330,7 +11327,7 @@ TEST(DBTest, DynamicMiscOptions) { assert_reseek_count(300, 1); } -TEST(DBTest, DontDeletePendingOutputs) { +TEST_F(DBTest, DontDeletePendingOutputs) { Options options; options.env = env_; options.create_if_missing = true; @@ -11361,7 +11358,7 @@ TEST(DBTest, DontDeletePendingOutputs) { Compact("a", "b"); } -TEST(DBTest, DontDeleteMovedFile) { +TEST_F(DBTest, DontDeleteMovedFile) { // This test triggers move compaction and verifies that the file is not // deleted when it's part of move compaction Options options = CurrentOptions(); @@ -11391,7 +11388,7 @@ TEST(DBTest, DontDeleteMovedFile) { Reopen(options); } -TEST(DBTest, DeleteMovedFileAfterCompaction) { +TEST_F(DBTest, DeleteMovedFileAfterCompaction) { // iter 1 -- delete_obsolete_files_period_micros == 0 for (int iter = 0; iter < 2; ++iter) { // This test triggers move compaction and verifies that the file is not @@ -11465,7 +11462,7 @@ TEST(DBTest, DeleteMovedFileAfterCompaction) { } } -TEST(DBTest, OptimizeFiltersForHits) { +TEST_F(DBTest, OptimizeFiltersForHits) { Options options = CurrentOptions(); options.write_buffer_size = 256 * 1024; options.target_file_size_base = 256 * 1024; @@ -11517,7 +11514,7 @@ TEST(DBTest, OptimizeFiltersForHits) { } } -TEST(DBTest, L0L1L2AndUpHitCounter) { +TEST_F(DBTest, L0L1L2AndUpHitCounter) { Options options = CurrentOptions(); options.write_buffer_size = 32 * 1024; options.target_file_size_base = 32 * 1024; @@ -11555,7 +11552,7 @@ TEST(DBTest, L0L1L2AndUpHitCounter) { TestGetTickerCount(options, GET_HIT_L2_AND_UP)); } -TEST(DBTest, EncodeDecompressedBlockSizeTest) { +TEST_F(DBTest, EncodeDecompressedBlockSizeTest) { // iter 0 -- zlib // iter 1 -- bzip2 // iter 2 -- lz4 @@ -11597,7 +11594,7 @@ TEST(DBTest, EncodeDecompressedBlockSizeTest) { } } -TEST(DBTest, MutexWaitStats) { +TEST_F(DBTest, MutexWaitStats) { Options options = CurrentOptions(); options.create_if_missing = true; options.statistics = rocksdb::CreateDBStatistics(); @@ -11626,7 +11623,7 @@ TEST(DBTest, MutexWaitStats) { // 6. PurgeObsoleteFiles() tries to delete file 13, but this file is blocked by // pending outputs since compaction (1) is still running. It is not deleted and // it is not present in obsolete_files_ anymore. Therefore, we never delete it. -TEST(DBTest, DeleteObsoleteFilesPendingOutputs) { +TEST_F(DBTest, DeleteObsoleteFilesPendingOutputs) { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 2 * 1024 * 1024; // 2 MB @@ -11701,7 +11698,7 @@ TEST(DBTest, DeleteObsoleteFilesPendingOutputs) { ASSERT_TRUE(!env_->FileExists(dbname_ + "/" + file_on_L2)); } -TEST(DBTest, CloseSpeedup) { +TEST_F(DBTest, CloseSpeedup) { Options options = CurrentOptions(); options.compaction_style = kCompactionStyleLevel; options.write_buffer_size = 100 << 10; // 100KB @@ -11757,5 +11754,6 @@ TEST(DBTest, CloseSpeedup) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index 332d7723c..56e292742 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -49,9 +49,9 @@ static void TestKey(const std::string& key, ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); } -class FormatTest { }; +class FormatTest : public testing::Test {}; -TEST(FormatTest, InternalKey_EncodeDecode) { +TEST_F(FormatTest, InternalKey_EncodeDecode) { const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" }; const uint64_t seq[] = { 1, 2, 3, @@ -67,7 +67,7 @@ TEST(FormatTest, InternalKey_EncodeDecode) { } } -TEST(FormatTest, InternalKeyShortSeparator) { +TEST_F(FormatTest, InternalKeyShortSeparator) { // When user keys are same ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), @@ -103,14 +103,14 @@ TEST(FormatTest, InternalKeyShortSeparator) { IKey("foo", 200, kTypeValue))); } -TEST(FormatTest, InternalKeyShortestSuccessor) { +TEST_F(FormatTest, InternalKeyShortestSuccessor) { ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), ShortSuccessor(IKey("foo", 100, kTypeValue))); ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue), ShortSuccessor(IKey("\xff\xff", 100, kTypeValue))); } -TEST(FormatTest, IterKeyOperation) { +TEST_F(FormatTest, IterKeyOperation) { IterKey k; const char p[] = "abcdefghijklmnopqrstuvwxyz"; const char q[] = "0123456789"; @@ -152,5 +152,6 @@ TEST(FormatTest, IterKeyOperation) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index dd55a02c4..4b39cbaa6 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -23,7 +23,7 @@ namespace rocksdb { -class DeleteFileTest { +class DeleteFileTest : public testing::Test { public: std::string dbname_; Options options_; @@ -146,7 +146,7 @@ class DeleteFileTest { }; -TEST(DeleteFileTest, AddKeysAndQueryLevels) { +TEST_F(DeleteFileTest, AddKeysAndQueryLevels) { CreateTwoLevels(); std::vector metadata; db_->GetLiveFilesMetaData(&metadata); @@ -192,7 +192,7 @@ TEST(DeleteFileTest, AddKeysAndQueryLevels) { CloseDB(); } -TEST(DeleteFileTest, PurgeObsoleteFilesTest) { +TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) { CreateTwoLevels(); // there should be only one (empty) log file because CreateTwoLevels() // flushes the memtables to disk @@ -220,7 +220,7 @@ TEST(DeleteFileTest, PurgeObsoleteFilesTest) { CloseDB(); } -TEST(DeleteFileTest, DeleteFileWithIterator) { +TEST_F(DeleteFileTest, DeleteFileWithIterator) { CreateTwoLevels(); ReadOptions options; Iterator* it = db_->NewIterator(options); @@ -251,7 +251,7 @@ TEST(DeleteFileTest, DeleteFileWithIterator) { CloseDB(); } -TEST(DeleteFileTest, DeleteLogFiles) { +TEST_F(DeleteFileTest, DeleteLogFiles) { AddKeys(10, 0); VectorLogPtr logfiles; db_->GetSortedWalFiles(logfiles); @@ -288,7 +288,7 @@ TEST(DeleteFileTest, DeleteLogFiles) { CloseDB(); } -TEST(DeleteFileTest, DeleteNonDefaultColumnFamily) { +TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) { CloseDB(); DBOptions db_options; db_options.create_if_missing = true; @@ -360,6 +360,7 @@ TEST(DeleteFileTest, DeleteNonDefaultColumnFamily) { } //namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 297238692..d7addfd10 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -406,7 +406,7 @@ Status TestWritableFile::Sync() { return Status::OK(); } -class FaultInjectionTest { +class FaultInjectionTest : public testing::Test { protected: enum OptionConfig { kDefault, @@ -447,11 +447,8 @@ class FaultInjectionTest { base_env_(nullptr), env_(NULL), db_(NULL) { - NewDB(); } - ~FaultInjectionTest() { EXPECT_OK(TearDown()); } - bool ChangeOptions() { option_config_++; if (option_config_ >= kEnd) { @@ -532,15 +529,9 @@ class FaultInjectionTest { return s; } - Status SetUp() { - Status s = TearDown(); - if (s.ok()) { - s = NewDB(); - } - return s; - } + void SetUp() override { ASSERT_OK(NewDB()); } - Status TearDown() { + void TearDown() override { CloseDB(); Status s = DestroyDB(dbname_, options_); @@ -550,7 +541,7 @@ class FaultInjectionTest { tiny_cache_.reset(); - return s; + ASSERT_OK(s); } void Build(const WriteOptions& write_options, int start_idx, int num_vals) { @@ -696,10 +687,9 @@ class FaultInjectionTest { } }; -TEST(FaultInjectionTest, FaultTest) { +TEST_F(FaultInjectionTest, FaultTest) { do { Random rnd(301); - ASSERT_OK(SetUp()); for (size_t idx = 0; idx < kNumIterations; idx++) { int num_pre_sync = rnd.Uniform(kMaxNumValues); @@ -739,5 +729,6 @@ TEST(FaultInjectionTest, FaultTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/file_indexer_test.cc b/db/file_indexer_test.cc index f57df79a6..85e083546 100644 --- a/db/file_indexer_test.cc +++ b/db/file_indexer_test.cc @@ -41,7 +41,7 @@ class IntComparator : public Comparator { void FindShortSuccessor(std::string* key) const override {} }; -class FileIndexerTest { +class FileIndexerTest : public testing::Test { public: FileIndexerTest() : kNumLevels(4), files(new std::vector[kNumLevels]) {} @@ -90,7 +90,7 @@ class FileIndexerTest { }; // Case 0: Empty -TEST(FileIndexerTest, Empty) { +TEST_F(FileIndexerTest, Empty) { Arena arena; indexer = new FileIndexer(&ucmp); indexer->UpdateIndex(&arena, 0, files); @@ -98,7 +98,7 @@ TEST(FileIndexerTest, Empty) { } // Case 1: no overlap, files are on the left of next level files -TEST(FileIndexerTest, no_overlap_left) { +TEST_F(FileIndexerTest, no_overlap_left) { Arena arena; indexer = new FileIndexer(&ucmp); // level 1 @@ -138,7 +138,7 @@ TEST(FileIndexerTest, no_overlap_left) { } // Case 2: no overlap, files are on the right of next level files -TEST(FileIndexerTest, no_overlap_right) { +TEST_F(FileIndexerTest, no_overlap_right) { Arena arena; indexer = new FileIndexer(&ucmp); // level 1 @@ -180,7 +180,7 @@ TEST(FileIndexerTest, no_overlap_right) { } // Case 3: empty L2 -TEST(FileIndexerTest, empty_L2) { +TEST_F(FileIndexerTest, empty_L2) { Arena arena; indexer = new FileIndexer(&ucmp); for (uint32_t i = 1; i < kNumLevels; ++i) { @@ -220,7 +220,7 @@ TEST(FileIndexerTest, empty_L2) { } // Case 4: mixed -TEST(FileIndexerTest, mixed) { +TEST_F(FileIndexerTest, mixed) { Arena arena; indexer = new FileIndexer(&ucmp); // level 1 @@ -343,5 +343,6 @@ TEST(FileIndexerTest, mixed) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/filename_test.cc b/db/filename_test.cc index 5a5f880e4..2eafd5230 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -16,9 +16,9 @@ namespace rocksdb { -class FileNameTest { }; +class FileNameTest : public testing::Test {}; -TEST(FileNameTest, Parse) { +TEST_F(FileNameTest, Parse) { Slice db; FileType type; uint64_t number; @@ -105,7 +105,7 @@ TEST(FileNameTest, Parse) { }; } -TEST(FileNameTest, InfoLogFileName) { +TEST_F(FileNameTest, InfoLogFileName) { std::string dbname = ("/data/rocksdb"); std::string db_absolute_path; Env::Default()->GetAbsolutePath(dbname, &db_absolute_path); @@ -121,7 +121,7 @@ TEST(FileNameTest, InfoLogFileName) { OldInfoLogFileName(dbname, 666u, db_absolute_path, "/data/rocksdb_log")); } -TEST(FileNameTest, Construction) { +TEST_F(FileNameTest, Construction) { uint64_t number; FileType type; std::string fname; @@ -175,5 +175,6 @@ TEST(FileNameTest, Construction) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index 9279d20a1..dce1e6df8 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -20,7 +20,7 @@ namespace rocksdb { // TODO(icanadi) Mock out everything else: // 1. VersionSet // 2. Memtable -class FlushJobTest { +class FlushJobTest : public testing::Test { public: FlushJobTest() : env_(Env::Default()), @@ -80,7 +80,7 @@ class FlushJobTest { std::shared_ptr mock_table_factory_; }; -TEST(FlushJobTest, Empty) { +TEST_F(FlushJobTest, Empty) { JobContext job_context(0); auto cfd = versions_->GetColumnFamilySet()->GetDefault(); EventLogger event_logger(db_options_.info_log.get()); @@ -93,7 +93,7 @@ TEST(FlushJobTest, Empty) { job_context.Clean(); } -TEST(FlushJobTest, NonEmpty) { +TEST_F(FlushJobTest, NonEmpty) { JobContext job_context(0); auto cfd = versions_->GetColumnFamilySet()->GetDefault(); auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions()); @@ -123,4 +123,7 @@ TEST(FlushJobTest, NonEmpty) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/listener_test.cc b/db/listener_test.cc index 08a27ef64..0714b1938 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -35,7 +35,7 @@ namespace rocksdb { -class EventListenerTest { +class EventListenerTest : public testing::Test { public: EventListenerTest() { dbname_ = test::TmpDir() + "/listener_test"; @@ -157,7 +157,7 @@ class TestCompactionListener : public EventListener { std::vector compacted_dbs_; }; -TEST(EventListenerTest, OnSingleDBCompactionTest) { +TEST_F(EventListenerTest, OnSingleDBCompactionTest) { const int kTestKeySize = 16; const int kTestValueSize = 984; const int kEntrySize = kTestKeySize + kTestValueSize; @@ -226,7 +226,7 @@ class TestFlushListener : public EventListener { int stop_count; }; -TEST(EventListenerTest, OnSingleDBFlushTest) { +TEST_F(EventListenerTest, OnSingleDBFlushTest) { Options options; options.write_buffer_size = 100000; TestFlushListener* listener = new TestFlushListener(); @@ -257,7 +257,7 @@ TEST(EventListenerTest, OnSingleDBFlushTest) { } } -TEST(EventListenerTest, MultiCF) { +TEST_F(EventListenerTest, MultiCF) { Options options; options.write_buffer_size = 100000; TestFlushListener* listener = new TestFlushListener(); @@ -287,7 +287,7 @@ TEST(EventListenerTest, MultiCF) { } } -TEST(EventListenerTest, MultiDBMultiListeners) { +TEST_F(EventListenerTest, MultiDBMultiListeners) { std::vector listeners; const int kNumDBs = 5; const int kNumListeners = 10; @@ -363,7 +363,7 @@ TEST(EventListenerTest, MultiDBMultiListeners) { } } -TEST(EventListenerTest, DisableBGCompaction) { +TEST_F(EventListenerTest, DisableBGCompaction) { Options options; TestFlushListener* listener = new TestFlushListener(); const int kSlowdownTrigger = 5; @@ -396,6 +396,7 @@ TEST(EventListenerTest, DisableBGCompaction) { #endif // ROCKSDB_LITE int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/log_test.cc b/db/log_test.cc index eaaeb1b19..816e38d1a 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -41,7 +41,7 @@ static std::string RandomSkewedString(int i, Random* rnd) { return BigString(NumberString(i), rnd->Skewed(17)); } -class LogTest { +class LogTest : public testing::Test { private: class StringDest : public WritableFile { public: @@ -329,12 +329,9 @@ uint64_t LogTest::initial_offset_last_record_offsets_[] = 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize}; +TEST_F(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, Empty) { - ASSERT_EQ("EOF", Read()); -} - -TEST(LogTest, ReadWrite) { +TEST_F(LogTest, ReadWrite) { Write("foo"); Write("bar"); Write(""); @@ -347,7 +344,7 @@ TEST(LogTest, ReadWrite) { ASSERT_EQ("EOF", Read()); // Make sure reads at eof work } -TEST(LogTest, ManyBlocks) { +TEST_F(LogTest, ManyBlocks) { for (int i = 0; i < 100000; i++) { Write(NumberString(i)); } @@ -357,7 +354,7 @@ TEST(LogTest, ManyBlocks) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, Fragmentation) { +TEST_F(LogTest, Fragmentation) { Write("small"); Write(BigString("medium", 50000)); Write(BigString("large", 100000)); @@ -367,7 +364,7 @@ TEST(LogTest, Fragmentation) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, MarginalTrailer) { +TEST_F(LogTest, MarginalTrailer) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2*kHeaderSize; Write(BigString("foo", n)); @@ -380,7 +377,7 @@ TEST(LogTest, MarginalTrailer) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, MarginalTrailer2) { +TEST_F(LogTest, MarginalTrailer2) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2*kHeaderSize; Write(BigString("foo", n)); @@ -393,7 +390,7 @@ TEST(LogTest, MarginalTrailer2) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, ShortTrailer) { +TEST_F(LogTest, ShortTrailer) { const int n = kBlockSize - 2*kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize + 4), WrittenBytes()); @@ -405,7 +402,7 @@ TEST(LogTest, ShortTrailer) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, AlignedEof) { +TEST_F(LogTest, AlignedEof) { const int n = kBlockSize - 2*kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize + 4), WrittenBytes()); @@ -413,7 +410,7 @@ TEST(LogTest, AlignedEof) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, RandomRead) { +TEST_F(LogTest, RandomRead) { const int N = 500; Random write_rnd(301); for (int i = 0; i < N; i++) { @@ -428,7 +425,7 @@ TEST(LogTest, RandomRead) { // Tests of all the error paths in log_reader.cc follow: -TEST(LogTest, ReadError) { +TEST_F(LogTest, ReadError) { Write("foo"); ForceError(); ASSERT_EQ("EOF", Read()); @@ -436,7 +433,7 @@ TEST(LogTest, ReadError) { ASSERT_EQ("OK", MatchError("read error")); } -TEST(LogTest, BadRecordType) { +TEST_F(LogTest, BadRecordType) { Write("foo"); // Type is stored in header[6] IncrementByte(6, 100); @@ -446,7 +443,7 @@ TEST(LogTest, BadRecordType) { ASSERT_EQ("OK", MatchError("unknown record type")); } -TEST(LogTest, TruncatedTrailingRecordIsIgnored) { +TEST_F(LogTest, TruncatedTrailingRecordIsIgnored) { Write("foo"); ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); @@ -455,7 +452,7 @@ TEST(LogTest, TruncatedTrailingRecordIsIgnored) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, BadLength) { +TEST_F(LogTest, BadLength) { const int kPayloadSize = kBlockSize - kHeaderSize; Write(BigString("bar", kPayloadSize)); Write("foo"); @@ -466,7 +463,7 @@ TEST(LogTest, BadLength) { ASSERT_EQ("OK", MatchError("bad record length")); } -TEST(LogTest, BadLengthAtEndIsIgnored) { +TEST_F(LogTest, BadLengthAtEndIsIgnored) { Write("foo"); ShrinkSize(1); ASSERT_EQ("EOF", Read()); @@ -474,7 +471,7 @@ TEST(LogTest, BadLengthAtEndIsIgnored) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, ChecksumMismatch) { +TEST_F(LogTest, ChecksumMismatch) { Write("foo"); IncrementByte(0, 10); ASSERT_EQ("EOF", Read()); @@ -482,7 +479,7 @@ TEST(LogTest, ChecksumMismatch) { ASSERT_EQ("OK", MatchError("checksum mismatch")); } -TEST(LogTest, UnexpectedMiddleType) { +TEST_F(LogTest, UnexpectedMiddleType) { Write("foo"); SetByte(6, kMiddleType); FixChecksum(0, 3); @@ -491,7 +488,7 @@ TEST(LogTest, UnexpectedMiddleType) { ASSERT_EQ("OK", MatchError("missing start")); } -TEST(LogTest, UnexpectedLastType) { +TEST_F(LogTest, UnexpectedLastType) { Write("foo"); SetByte(6, kLastType); FixChecksum(0, 3); @@ -500,7 +497,7 @@ TEST(LogTest, UnexpectedLastType) { ASSERT_EQ("OK", MatchError("missing start")); } -TEST(LogTest, UnexpectedFullType) { +TEST_F(LogTest, UnexpectedFullType) { Write("foo"); Write("bar"); SetByte(6, kFirstType); @@ -511,7 +508,7 @@ TEST(LogTest, UnexpectedFullType) { ASSERT_EQ("OK", MatchError("partial record without end")); } -TEST(LogTest, UnexpectedFirstType) { +TEST_F(LogTest, UnexpectedFirstType) { Write("foo"); Write(BigString("bar", 100000)); SetByte(6, kFirstType); @@ -522,7 +519,7 @@ TEST(LogTest, UnexpectedFirstType) { ASSERT_EQ("OK", MatchError("partial record without end")); } -TEST(LogTest, MissingLastIsIgnored) { +TEST_F(LogTest, MissingLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Remove the LAST block, including header. ShrinkSize(14); @@ -531,7 +528,7 @@ TEST(LogTest, MissingLastIsIgnored) { ASSERT_EQ(0U, DroppedBytes()); } -TEST(LogTest, PartialLastIsIgnored) { +TEST_F(LogTest, PartialLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Cause a bad record length in the LAST block. ShrinkSize(1); @@ -540,7 +537,7 @@ TEST(LogTest, PartialLastIsIgnored) { ASSERT_EQ(0U, DroppedBytes()); } -TEST(LogTest, ErrorJoinsRecords) { +TEST_F(LogTest, ErrorJoinsRecords) { // Consider two fragmented records: // first(R1) last(R1) first(R2) last(R2) // where the middle two fragments disappear. We do not want @@ -563,61 +560,43 @@ TEST(LogTest, ErrorJoinsRecords) { ASSERT_GE(dropped, 2 * kBlockSize); } -TEST(LogTest, ReadStart) { - CheckInitialOffsetRecord(0, 0); -} +TEST_F(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } -TEST(LogTest, ReadSecondOneOff) { - CheckInitialOffsetRecord(1, 1); -} +TEST_F(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } -TEST(LogTest, ReadSecondTenThousand) { - CheckInitialOffsetRecord(10000, 1); -} +TEST_F(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } -TEST(LogTest, ReadSecondStart) { - CheckInitialOffsetRecord(10007, 1); -} +TEST_F(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } -TEST(LogTest, ReadThirdOneOff) { - CheckInitialOffsetRecord(10008, 2); -} +TEST_F(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } -TEST(LogTest, ReadThirdStart) { - CheckInitialOffsetRecord(20014, 2); -} +TEST_F(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } -TEST(LogTest, ReadFourthOneOff) { - CheckInitialOffsetRecord(20015, 3); -} +TEST_F(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } -TEST(LogTest, ReadFourthFirstBlockTrailer) { +TEST_F(LogTest, ReadFourthFirstBlockTrailer) { CheckInitialOffsetRecord(log::kBlockSize - 4, 3); } -TEST(LogTest, ReadFourthMiddleBlock) { +TEST_F(LogTest, ReadFourthMiddleBlock) { CheckInitialOffsetRecord(log::kBlockSize + 1, 3); } -TEST(LogTest, ReadFourthLastBlock) { +TEST_F(LogTest, ReadFourthLastBlock) { CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3); } -TEST(LogTest, ReadFourthStart) { +TEST_F(LogTest, ReadFourthStart) { CheckInitialOffsetRecord( 2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, 3); } -TEST(LogTest, ReadEnd) { - CheckOffsetPastEndReturnsNoRecords(0); -} +TEST_F(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } -TEST(LogTest, ReadPastEnd) { - CheckOffsetPastEndReturnsNoRecords(5); -} +TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } -TEST(LogTest, ClearEofSingleBlock) { +TEST_F(LogTest, ClearEofSingleBlock) { Write("foo"); Write("bar"); ForceEOF(3 + kHeaderSize + 2); @@ -632,7 +611,7 @@ TEST(LogTest, ClearEofSingleBlock) { ASSERT_TRUE(IsEOF()); } -TEST(LogTest, ClearEofMultiBlock) { +TEST_F(LogTest, ClearEofMultiBlock) { size_t num_full_blocks = 5; size_t n = (kBlockSize - kHeaderSize) * num_full_blocks + 25; Write(BigString("foo", n)); @@ -649,7 +628,7 @@ TEST(LogTest, ClearEofMultiBlock) { ASSERT_TRUE(IsEOF()); } -TEST(LogTest, ClearEofError) { +TEST_F(LogTest, ClearEofError) { // If an error occurs during Read() in UnmarkEOF(), the records contained // in the buffer should be returned on subsequent calls of ReadRecord() // until no more full records are left, whereafter ReadRecord() should return @@ -667,7 +646,7 @@ TEST(LogTest, ClearEofError) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, ClearEofError2) { +TEST_F(LogTest, ClearEofError2) { Write("foo"); Write("bar"); UnmarkEOF(); @@ -685,5 +664,6 @@ TEST(LogTest, ClearEofError2) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index e91efe87a..143260cd4 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -59,9 +59,9 @@ std::shared_ptr OpenDb(bool read_only = false) { return std::shared_ptr(db); } -class PerfContextTest { }; +class PerfContextTest : public testing::Test {}; -TEST(PerfContextTest, SeekIntoDeletion) { +TEST_F(PerfContextTest, SeekIntoDeletion) { DestroyDB(kDbName, Options()); auto db = OpenDb(); WriteOptions write_options; @@ -144,7 +144,7 @@ TEST(PerfContextTest, SeekIntoDeletion) { std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString(); } -TEST(PerfContextTest, StopWatchNanoOverhead) { +TEST_F(PerfContextTest, StopWatchNanoOverhead) { // profile the timer cost by itself! const int kTotalIterations = 1000000; std::vector timings(kTotalIterations); @@ -162,7 +162,7 @@ TEST(PerfContextTest, StopWatchNanoOverhead) { std::cout << histogram.ToString(); } -TEST(PerfContextTest, StopWatchOverhead) { +TEST_F(PerfContextTest, StopWatchOverhead) { // profile the timer cost by itself! const int kTotalIterations = 1000000; uint64_t elapsed = 0; @@ -425,7 +425,7 @@ void ProfileQueries(bool enabled_time = false) { } } -TEST(PerfContextTest, KeyComparisonCount) { +TEST_F(PerfContextTest, KeyComparisonCount) { SetPerfLevel(kEnableCount); ProfileQueries(); @@ -448,7 +448,7 @@ TEST(PerfContextTest, KeyComparisonCount) { // memtable. When there are two memtables, even the avg Seek Key comparison // starts to become linear to the input size. -TEST(PerfContextTest, SeekKeyComparison) { +TEST_F(PerfContextTest, SeekKeyComparison) { DestroyDB(kDbName, Options()); auto db = OpenDb(); WriteOptions write_options; @@ -517,6 +517,7 @@ TEST(PerfContextTest, SeekKeyComparison) { } int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); for (int i = 1; i < argc; i++) { int n; @@ -544,5 +545,5 @@ int main(int argc, char** argv) { std::cout << kDbName << "\n"; - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index ad786eb04..72395609a 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -37,7 +37,7 @@ using std::unique_ptr; namespace rocksdb { -class PlainTableDBTest { +class PlainTableDBTest : public testing::Test { protected: private: std::string dbname_; @@ -182,7 +182,7 @@ class PlainTableDBTest { } }; -TEST(PlainTableDBTest, Empty) { +TEST_F(PlainTableDBTest, Empty) { ASSERT_TRUE(dbfull() != nullptr); ASSERT_EQ("NOT_FOUND", Get("0000000000000foo")); } @@ -302,7 +302,7 @@ class TestPlainTableFactory : public PlainTableFactory { bool* expect_bloom_not_match_; }; -TEST(PlainTableDBTest, Flush) { +TEST_F(PlainTableDBTest, Flush) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { @@ -389,7 +389,7 @@ TEST(PlainTableDBTest, Flush) { } } -TEST(PlainTableDBTest, Flush2) { +TEST_F(PlainTableDBTest, Flush2) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { @@ -469,7 +469,7 @@ TEST(PlainTableDBTest, Flush2) { } } -TEST(PlainTableDBTest, Iterator) { +TEST_F(PlainTableDBTest, Iterator) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (EncodingType encoding_type : {kPlain, kPrefix}) { @@ -603,7 +603,7 @@ std::string MakeLongKey(size_t length, char c) { } } // namespace -TEST(PlainTableDBTest, IteratorLargeKeys) { +TEST_F(PlainTableDBTest, IteratorLargeKeys) { Options options = CurrentOptions(); PlainTableOptions plain_table_options; @@ -653,7 +653,7 @@ std::string MakeLongKeyWithPrefix(size_t length, char c) { } } // namespace -TEST(PlainTableDBTest, IteratorLargeKeysWithPrefix) { +TEST_F(PlainTableDBTest, IteratorLargeKeysWithPrefix) { Options options = CurrentOptions(); PlainTableOptions plain_table_options; @@ -695,7 +695,7 @@ TEST(PlainTableDBTest, IteratorLargeKeysWithPrefix) { delete iter; } -TEST(PlainTableDBTest, IteratorReverseSuffixComparator) { +TEST_F(PlainTableDBTest, IteratorReverseSuffixComparator) { Options options = CurrentOptions(); options.create_if_missing = true; // Set only one bucket to force bucket conflict. @@ -764,7 +764,7 @@ TEST(PlainTableDBTest, IteratorReverseSuffixComparator) { delete iter; } -TEST(PlainTableDBTest, HashBucketConflict) { +TEST_F(PlainTableDBTest, HashBucketConflict) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (unsigned char i = 1; i <= 3; i++) { @@ -857,7 +857,7 @@ TEST(PlainTableDBTest, HashBucketConflict) { } } -TEST(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) { +TEST_F(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) { for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; huge_page_tlb_size += 2 * 1024 * 1024) { for (unsigned char i = 1; i <= 3; i++) { @@ -950,7 +950,7 @@ TEST(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) { } } -TEST(PlainTableDBTest, NonExistingKeyToNonEmptyBucket) { +TEST_F(PlainTableDBTest, NonExistingKeyToNonEmptyBucket) { Options options = CurrentOptions(); options.create_if_missing = true; // Set only one bucket to force bucket conflict. @@ -1006,7 +1006,7 @@ static std::string RandomString(Random* rnd, int len) { return r; } -TEST(PlainTableDBTest, CompactionTrigger) { +TEST_F(PlainTableDBTest, CompactionTrigger) { Options options = CurrentOptions(); options.write_buffer_size = 100 << 10; //100KB options.num_levels = 3; @@ -1040,7 +1040,7 @@ TEST(PlainTableDBTest, CompactionTrigger) { ASSERT_EQ(NumTableFilesAtLevel(1), 1); } -TEST(PlainTableDBTest, AdaptiveTable) { +TEST_F(PlainTableDBTest, AdaptiveTable) { Options options = CurrentOptions(); options.create_if_missing = true; @@ -1086,5 +1086,6 @@ TEST(PlainTableDBTest, AdaptiveTable) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/prefix_test.cc b/db/prefix_test.cc index a95422243..bb15d69bc 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -144,7 +144,7 @@ std::string Get(DB* db, const ReadOptions& read_options, uint64_t prefix, } } // namespace -class PrefixTest { +class PrefixTest : public testing::Test { public: std::shared_ptr OpenDb() { DB* db; @@ -217,7 +217,7 @@ class PrefixTest { Options options; }; -TEST(PrefixTest, TestResult) { +TEST_F(PrefixTest, TestResult) { for (int num_buckets = 1; num_buckets <= 2; num_buckets++) { FirstOption(); while (NextOptions(num_buckets)) { @@ -390,7 +390,7 @@ TEST(PrefixTest, TestResult) { } } -TEST(PrefixTest, DynamicPrefixIterator) { +TEST_F(PrefixTest, DynamicPrefixIterator) { while (NextOptions(FLAGS_bucket_count)) { std::cout << "*** Mem table: " << options.memtable_factory->Name() << std::endl; @@ -492,10 +492,11 @@ TEST(PrefixTest, DynamicPrefixIterator) { } int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); ParseCommandLineFlags(&argc, &argv, true); std::cout << kDbName << "\n"; - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } #endif // GFLAGS diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index d8e113c66..3d1418625 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -31,9 +31,9 @@ struct TestComparator { } }; -class SkipTest { }; +class SkipTest : public testing::Test {}; -TEST(SkipTest, Empty) { +TEST_F(SkipTest, Empty) { Arena arena; TestComparator cmp; SkipList list(cmp, &arena); @@ -49,7 +49,7 @@ TEST(SkipTest, Empty) { ASSERT_TRUE(!iter.Valid()); } -TEST(SkipTest, InsertAndLookup) { +TEST_F(SkipTest, InsertAndLookup) { const int N = 2000; const int R = 5000; Random rnd(1000); @@ -287,7 +287,7 @@ const uint32_t ConcurrentTest::K; // Simple test that does single-threaded testing of the ConcurrentTest // scaffolding. -TEST(SkipTest, ConcurrentWithoutThreads) { +TEST_F(SkipTest, ConcurrentWithoutThreads) { ConcurrentTest test; Random rnd(test::RandomSeed()); for (int i = 0; i < 10000; i++) { @@ -364,14 +364,15 @@ static void RunConcurrent(int run) { } } -TEST(SkipTest, Concurrent1) { RunConcurrent(1); } -TEST(SkipTest, Concurrent2) { RunConcurrent(2); } -TEST(SkipTest, Concurrent3) { RunConcurrent(3); } -TEST(SkipTest, Concurrent4) { RunConcurrent(4); } -TEST(SkipTest, Concurrent5) { RunConcurrent(5); } +TEST_F(SkipTest, Concurrent1) { RunConcurrent(1); } +TEST_F(SkipTest, Concurrent2) { RunConcurrent(2); } +TEST_F(SkipTest, Concurrent3) { RunConcurrent(3); } +TEST_F(SkipTest, Concurrent4) { RunConcurrent(4); } +TEST_F(SkipTest, Concurrent5) { RunConcurrent(5); } } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index 333c47b62..7b0c99c71 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -22,8 +22,7 @@ namespace rocksdb { -class TablePropertiesTest { -}; +class TablePropertiesTest : public testing::Test {}; // TODO(kailiu) the following classes should be moved to some more general // places, so that other tests can also make use of them. @@ -195,7 +194,7 @@ void TestCustomizedTablePropertiesCollector( } } // namespace -TEST(TablePropertiesTest, CustomizedTablePropertiesCollector) { +TEST_F(TablePropertiesTest, CustomizedTablePropertiesCollector) { // Test properties collectors with internal keys or regular keys // for block based table for (bool encode_as_internal : { true, false }) { @@ -301,7 +300,7 @@ void TestInternalKeyPropertiesCollector( } } // namespace -TEST(TablePropertiesTest, InternalKeyPropertiesCollector) { +TEST_F(TablePropertiesTest, InternalKeyPropertiesCollector) { TestInternalKeyPropertiesCollector( kBlockBasedTableMagicNumber, true /* sanitize */, @@ -326,5 +325,6 @@ TEST(TablePropertiesTest, InternalKeyPropertiesCollector) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index 33d3fa269..c92cad20d 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -12,7 +12,7 @@ namespace rocksdb { -class VersionBuilderTest { +class VersionBuilderTest : public testing::Test { public: const Comparator* ucmp_; InternalKeyComparator icmp_; @@ -95,7 +95,7 @@ void UnrefFilesInVersion(VersionStorageInfo* new_vstorage) { } } -TEST(VersionBuilderTest, ApplyAndSaveTo) { +TEST_F(VersionBuilderTest, ApplyAndSaveTo) { Add(0, 1U, "150", "200", 100U); Add(1, 66U, "150", "200", 100U); @@ -131,7 +131,7 @@ TEST(VersionBuilderTest, ApplyAndSaveTo) { UnrefFilesInVersion(&new_vstorage); } -TEST(VersionBuilderTest, ApplyAndSaveToDynamic) { +TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic) { ioptions_.level_compaction_dynamic_level_bytes = true; Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U); @@ -168,7 +168,7 @@ TEST(VersionBuilderTest, ApplyAndSaveToDynamic) { UnrefFilesInVersion(&new_vstorage); } -TEST(VersionBuilderTest, ApplyAndSaveToDynamic2) { +TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic2) { ioptions_.level_compaction_dynamic_level_bytes = true; Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U); @@ -207,7 +207,7 @@ TEST(VersionBuilderTest, ApplyAndSaveToDynamic2) { UnrefFilesInVersion(&new_vstorage); } -TEST(VersionBuilderTest, ApplyMultipleAndSaveTo) { +TEST_F(VersionBuilderTest, ApplyMultipleAndSaveTo) { UpdateVersionStorageInfo(); VersionEdit version_edit; @@ -236,7 +236,7 @@ TEST(VersionBuilderTest, ApplyMultipleAndSaveTo) { UnrefFilesInVersion(&new_vstorage); } -TEST(VersionBuilderTest, ApplyDeleteAndSaveTo) { +TEST_F(VersionBuilderTest, ApplyDeleteAndSaveTo) { UpdateVersionStorageInfo(); EnvOptions env_options; @@ -273,7 +273,7 @@ TEST(VersionBuilderTest, ApplyDeleteAndSaveTo) { UnrefFilesInVersion(&new_vstorage); } -TEST(VersionBuilderTest, EstimatedActiveKeys) { +TEST_F(VersionBuilderTest, EstimatedActiveKeys) { const uint32_t kTotalSamples = 20; const uint32_t kNumLevels = 5; const uint32_t kFilesPerLevel = 8; @@ -297,4 +297,7 @@ TEST(VersionBuilderTest, EstimatedActiveKeys) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index ec123d2c1..8b7b31bdd 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -22,9 +22,9 @@ static void TestEncodeDecode(const VersionEdit& edit) { ASSERT_EQ(encoded, encoded2); } -class VersionEditTest { }; +class VersionEditTest : public testing::Test {}; -TEST(VersionEditTest, EncodeDecode) { +TEST_F(VersionEditTest, EncodeDecode) { static const uint64_t kBig = 1ull << 50; static const uint32_t kBig32Bit = 1ull << 30; @@ -45,7 +45,7 @@ TEST(VersionEditTest, EncodeDecode) { TestEncodeDecode(edit); } -TEST(VersionEditTest, EncodeEmptyFile) { +TEST_F(VersionEditTest, EncodeEmptyFile) { VersionEdit edit; edit.AddFile(0, 0, 0, 0, InternalKey(), @@ -55,7 +55,7 @@ TEST(VersionEditTest, EncodeEmptyFile) { ASSERT_TRUE(!edit.EncodeTo(&buffer)); } -TEST(VersionEditTest, ColumnFamilyTest) { +TEST_F(VersionEditTest, ColumnFamilyTest) { VersionEdit edit; edit.SetColumnFamily(2); edit.AddColumnFamily("column_family"); @@ -71,5 +71,6 @@ TEST(VersionEditTest, ColumnFamilyTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 11244dd64..954c72dc2 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -14,7 +14,7 @@ namespace rocksdb { -class GenerateLevelFilesBriefTest { +class GenerateLevelFilesBriefTest : public testing::Test { public: std::vector files_; LevelFilesBrief file_level_; @@ -49,21 +49,20 @@ class GenerateLevelFilesBriefTest { } }; -TEST(GenerateLevelFilesBriefTest, Empty) { +TEST_F(GenerateLevelFilesBriefTest, Empty) { DoGenerateLevelFilesBrief(&file_level_, files_, &arena_); ASSERT_EQ(0u, file_level_.num_files); ASSERT_EQ(0, Compare()); } -TEST(GenerateLevelFilesBriefTest, Single) { +TEST_F(GenerateLevelFilesBriefTest, Single) { Add("p", "q"); DoGenerateLevelFilesBrief(&file_level_, files_, &arena_); ASSERT_EQ(1u, file_level_.num_files); ASSERT_EQ(0, Compare()); } - -TEST(GenerateLevelFilesBriefTest, Multiple) { +TEST_F(GenerateLevelFilesBriefTest, Multiple) { Add("150", "200"); Add("200", "250"); Add("300", "350"); @@ -89,7 +88,7 @@ Options GetOptionsWithNumLevels(int num_levels, return opt; } -class VersionStorageInfoTest { +class VersionStorageInfoTest : public testing::Test { public: const Comparator* ucmp_; InternalKeyComparator icmp_; @@ -138,7 +137,7 @@ class VersionStorageInfoTest { } }; -TEST(VersionStorageInfoTest, MaxBytesForLevelStatic) { +TEST_F(VersionStorageInfoTest, MaxBytesForLevelStatic) { ioptions_.level_compaction_dynamic_level_bytes = false; mutable_cf_options_.max_bytes_for_level_base = 10; mutable_cf_options_.max_bytes_for_level_multiplier = 5; @@ -154,7 +153,7 @@ TEST(VersionStorageInfoTest, MaxBytesForLevelStatic) { ASSERT_EQ(0, logger_->log_count); } -TEST(VersionStorageInfoTest, MaxBytesForLevelDynamic) { +TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamic) { ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.max_bytes_for_level_base = 1000; mutable_cf_options_.max_bytes_for_level_multiplier = 5; @@ -196,7 +195,7 @@ TEST(VersionStorageInfoTest, MaxBytesForLevelDynamic) { ASSERT_EQ(vstorage_.base_level(), 1); } -TEST(VersionStorageInfoTest, MaxBytesForLevelDynamicLotsOfData) { +TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamicLotsOfData) { ioptions_.level_compaction_dynamic_level_bytes = true; mutable_cf_options_.max_bytes_for_level_base = 100; mutable_cf_options_.max_bytes_for_level_multiplier = 2; @@ -216,7 +215,7 @@ TEST(VersionStorageInfoTest, MaxBytesForLevelDynamicLotsOfData) { ASSERT_EQ(0, logger_->log_count); } -class FindLevelFileTest { +class FindLevelFileTest : public testing::Test { public: LevelFilesBrief file_level_; bool disjoint_sorted_files_; @@ -274,7 +273,7 @@ class FindLevelFileTest { } }; -TEST(FindLevelFileTest, LevelEmpty) { +TEST_F(FindLevelFileTest, LevelEmpty) { LevelFileInit(0); ASSERT_EQ(0, Find("foo")); @@ -284,7 +283,7 @@ TEST(FindLevelFileTest, LevelEmpty) { ASSERT_TRUE(! Overlaps(nullptr, nullptr)); } -TEST(FindLevelFileTest, LevelSingle) { +TEST_F(FindLevelFileTest, LevelSingle) { LevelFileInit(1); Add("p", "q"); @@ -316,7 +315,7 @@ TEST(FindLevelFileTest, LevelSingle) { ASSERT_TRUE(Overlaps(nullptr, nullptr)); } -TEST(FindLevelFileTest, LevelMultiple) { +TEST_F(FindLevelFileTest, LevelMultiple) { LevelFileInit(4); Add("150", "200"); @@ -356,7 +355,7 @@ TEST(FindLevelFileTest, LevelMultiple) { ASSERT_TRUE(Overlaps("450", "500")); } -TEST(FindLevelFileTest, LevelMultipleNullBoundaries) { +TEST_F(FindLevelFileTest, LevelMultipleNullBoundaries) { LevelFileInit(4); Add("150", "200"); @@ -378,7 +377,7 @@ TEST(FindLevelFileTest, LevelMultipleNullBoundaries) { ASSERT_TRUE(Overlaps("450", nullptr)); } -TEST(FindLevelFileTest, LevelOverlapSequenceChecks) { +TEST_F(FindLevelFileTest, LevelOverlapSequenceChecks) { LevelFileInit(1); Add("200", "200", 5000, 3000); @@ -389,7 +388,7 @@ TEST(FindLevelFileTest, LevelOverlapSequenceChecks) { ASSERT_TRUE(Overlaps("200", "210")); } -TEST(FindLevelFileTest, LevelOverlappingFiles) { +TEST_F(FindLevelFileTest, LevelOverlappingFiles) { LevelFileInit(2); Add("150", "600"); @@ -412,5 +411,6 @@ TEST(FindLevelFileTest, LevelOverlappingFiles) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index e3abfc1e0..ebde0dc5e 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -23,7 +23,7 @@ namespace rocksdb { // TODO(icanadi) mock out VersionSet // TODO(icanadi) move other WalManager-specific tests from db_test here -class WalManagerTest { +class WalManagerTest : public testing::Test { public: WalManagerTest() : env_(Env::Default()), @@ -104,7 +104,7 @@ class WalManagerTest { uint64_t current_log_number_; }; -TEST(WalManagerTest, ReadFirstRecordCache) { +TEST_F(WalManagerTest, ReadFirstRecordCache) { Init(); std::string path = dbname_ + "/000001.log"; unique_ptr file; @@ -192,7 +192,7 @@ int CountRecords(TransactionLogIterator* iter) { } } // namespace -TEST(WalManagerTest, WALArchivalSizeLimit) { +TEST_F(WalManagerTest, WALArchivalSizeLimit) { db_options_.WAL_ttl_seconds = 0; db_options_.WAL_size_limit_MB = 1000; Init(); @@ -230,7 +230,7 @@ TEST(WalManagerTest, WALArchivalSizeLimit) { ASSERT_TRUE(log_files.empty()); } -TEST(WalManagerTest, WALArchivalTtl) { +TEST_F(WalManagerTest, WALArchivalTtl) { db_options_.WAL_ttl_seconds = 1000; Init(); @@ -256,7 +256,7 @@ TEST(WalManagerTest, WALArchivalTtl) { ASSERT_TRUE(log_files.empty()); } -TEST(WalManagerTest, TransactionLogIteratorMoveOverZeroFiles) { +TEST_F(WalManagerTest, TransactionLogIteratorMoveOverZeroFiles) { Init(); RollTheLog(false); Put("key1", std::string(1024, 'a')); @@ -270,7 +270,7 @@ TEST(WalManagerTest, TransactionLogIteratorMoveOverZeroFiles) { ASSERT_EQ(2, CountRecords(iter.get())); } -TEST(WalManagerTest, TransactionLogIteratorJustEmptyFile) { +TEST_F(WalManagerTest, TransactionLogIteratorJustEmptyFile) { Init(); RollTheLog(false); auto iter = OpenTransactionLogIter(0); @@ -280,4 +280,7 @@ TEST(WalManagerTest, TransactionLogIteratorJustEmptyFile) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 0626d4554..7c138455f 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -82,16 +82,16 @@ static std::string PrintContents(WriteBatch* b) { return state; } -class WriteBatchTest { }; +class WriteBatchTest : public testing::Test {}; -TEST(WriteBatchTest, Empty) { +TEST_F(WriteBatchTest, Empty) { WriteBatch batch; ASSERT_EQ("", PrintContents(&batch)); ASSERT_EQ(0, WriteBatchInternal::Count(&batch)); ASSERT_EQ(0, batch.Count()); } -TEST(WriteBatchTest, Multiple) { +TEST_F(WriteBatchTest, Multiple) { WriteBatch batch; batch.Put(Slice("foo"), Slice("bar")); batch.Delete(Slice("box")); @@ -106,7 +106,7 @@ TEST(WriteBatchTest, Multiple) { ASSERT_EQ(3, batch.Count()); } -TEST(WriteBatchTest, Corruption) { +TEST_F(WriteBatchTest, Corruption) { WriteBatch batch; batch.Put(Slice("foo"), Slice("bar")); batch.Delete(Slice("box")); @@ -119,7 +119,7 @@ TEST(WriteBatchTest, Corruption) { PrintContents(&batch)); } -TEST(WriteBatchTest, Append) { +TEST_F(WriteBatchTest, Append) { WriteBatch b1, b2; WriteBatchInternal::SetSequence(&b1, 200); WriteBatchInternal::SetSequence(&b2, 300); @@ -188,7 +188,7 @@ namespace { }; } -TEST(WriteBatchTest, MergeNotImplemented) { +TEST_F(WriteBatchTest, MergeNotImplemented) { WriteBatch batch; batch.Merge(Slice("foo"), Slice("bar")); ASSERT_EQ(1, batch.Count()); @@ -199,7 +199,7 @@ TEST(WriteBatchTest, MergeNotImplemented) { ASSERT_OK(batch.Iterate(&handler)); } -TEST(WriteBatchTest, PutNotImplemented) { +TEST_F(WriteBatchTest, PutNotImplemented) { WriteBatch batch; batch.Put(Slice("k1"), Slice("v1")); ASSERT_EQ(1, batch.Count()); @@ -210,7 +210,7 @@ TEST(WriteBatchTest, PutNotImplemented) { ASSERT_OK(batch.Iterate(&handler)); } -TEST(WriteBatchTest, DeleteNotImplemented) { +TEST_F(WriteBatchTest, DeleteNotImplemented) { WriteBatch batch; batch.Delete(Slice("k2")); ASSERT_EQ(1, batch.Count()); @@ -221,7 +221,7 @@ TEST(WriteBatchTest, DeleteNotImplemented) { ASSERT_OK(batch.Iterate(&handler)); } -TEST(WriteBatchTest, Blob) { +TEST_F(WriteBatchTest, Blob) { WriteBatch batch; batch.Put(Slice("k1"), Slice("v1")); batch.Put(Slice("k2"), Slice("v2")); @@ -251,7 +251,7 @@ TEST(WriteBatchTest, Blob) { handler.seen); } -TEST(WriteBatchTest, Continue) { +TEST_F(WriteBatchTest, Continue) { WriteBatch batch; struct Handler : public TestHandler { @@ -293,7 +293,7 @@ TEST(WriteBatchTest, Continue) { handler.seen); } -TEST(WriteBatchTest, PutGatherSlices) { +TEST_F(WriteBatchTest, PutGatherSlices) { WriteBatch batch; batch.Put(Slice("foo"), Slice("bar")); @@ -336,7 +336,7 @@ class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl { }; } // namespace anonymous -TEST(WriteBatchTest, ColumnFamiliesBatchTest) { +TEST_F(WriteBatchTest, ColumnFamiliesBatchTest) { WriteBatch batch; ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8); batch.Put(&zero, Slice("foo"), Slice("bar")); @@ -360,7 +360,7 @@ TEST(WriteBatchTest, ColumnFamiliesBatchTest) { handler.seen); } -TEST(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) { +TEST_F(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) { WriteBatchWithIndex batch; ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8); batch.Put(&zero, Slice("foo"), Slice("bar")); @@ -445,5 +445,6 @@ TEST(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/write_controller_test.cc b/db/write_controller_test.cc index 1cec9658d..a24ebaacb 100644 --- a/db/write_controller_test.cc +++ b/db/write_controller_test.cc @@ -9,9 +9,9 @@ namespace rocksdb { -class WriteControllerTest {}; +class WriteControllerTest : public testing::Test {}; -TEST(WriteControllerTest, SanityTest) { +TEST_F(WriteControllerTest, SanityTest) { WriteController controller; auto stop_token_1 = controller.GetStopToken(); auto stop_token_2 = controller.GetStopToken(); @@ -37,4 +37,7 @@ TEST(WriteControllerTest, SanityTest) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/block_based_filter_block_test.cc b/table/block_based_filter_block_test.cc index 68ce5369e..017de5906 100644 --- a/table/block_based_filter_block_test.cc +++ b/table/block_based_filter_block_test.cc @@ -43,7 +43,7 @@ class TestHashFilter : public FilterPolicy { } }; -class FilterBlockTest { +class FilterBlockTest : public testing::Test { public: TestHashFilter policy_; BlockBasedTableOptions table_options_; @@ -53,7 +53,7 @@ class FilterBlockTest { } }; -TEST(FilterBlockTest, EmptyBuilder) { +TEST_F(FilterBlockTest, EmptyBuilder) { BlockBasedFilterBlockBuilder builder(nullptr, table_options_); BlockContents block(builder.Finish(), false, kNoCompression); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data)); @@ -63,7 +63,7 @@ TEST(FilterBlockTest, EmptyBuilder) { ASSERT_TRUE(reader.KeyMayMatch("foo", 100000)); } -TEST(FilterBlockTest, SingleChunk) { +TEST_F(FilterBlockTest, SingleChunk) { BlockBasedFilterBlockBuilder builder(nullptr, table_options_); builder.StartBlock(100); builder.Add("foo"); @@ -85,7 +85,7 @@ TEST(FilterBlockTest, SingleChunk) { ASSERT_TRUE(!reader.KeyMayMatch("other", 100)); } -TEST(FilterBlockTest, MultiChunk) { +TEST_F(FilterBlockTest, MultiChunk) { BlockBasedFilterBlockBuilder builder(nullptr, table_options_); // First filter @@ -136,7 +136,7 @@ TEST(FilterBlockTest, MultiChunk) { // Test for block based filter block // use new interface in FilterPolicy to create filter builder/reader -class BlockBasedFilterBlockTest { +class BlockBasedFilterBlockTest : public testing::Test { public: BlockBasedTableOptions table_options_; @@ -147,7 +147,7 @@ class BlockBasedFilterBlockTest { ~BlockBasedFilterBlockTest() {} }; -TEST(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { +TEST_F(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( nullptr, table_options_); BlockContents block(builder->Finish(), false, kNoCompression); @@ -161,7 +161,7 @@ TEST(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { delete reader; } -TEST(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { +TEST_F(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( nullptr, table_options_); builder->StartBlock(100); @@ -187,7 +187,7 @@ TEST(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { delete reader; } -TEST(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { +TEST_F(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( nullptr, table_options_); @@ -242,4 +242,7 @@ TEST(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/block_hash_index_test.cc b/table/block_hash_index_test.cc index dc6a0a0ac..b001c203a 100644 --- a/table/block_hash_index_test.cc +++ b/table/block_hash_index_test.cc @@ -50,9 +50,9 @@ class MapIterator : public Iterator { Data::const_iterator pos_; }; -class BlockTest {}; +class BlockTest : public testing::Test {}; -TEST(BlockTest, BasicTest) { +TEST_F(BlockTest, BasicTest) { const size_t keys_per_block = 4; const size_t prefix_size = 2; std::vector keys = {/* block 1 */ @@ -114,4 +114,7 @@ TEST(BlockTest, BasicTest) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/block_test.cc b/table/block_test.cc index fa263bcbd..c86f38da5 100644 --- a/table/block_test.cc +++ b/table/block_test.cc @@ -65,10 +65,10 @@ void GenerateRandomKVs(std::vector *keys, } } -class BlockTest {}; +class BlockTest : public testing::Test {}; // block test -TEST(BlockTest, SimpleTest) { +TEST_F(BlockTest, SimpleTest) { Random rnd(301); Options options = Options(); std::unique_ptr ic; @@ -201,7 +201,7 @@ void CheckBlockContents(BlockContents contents, const int max_key, } // In this test case, no two key share same prefix. -TEST(BlockTest, SimpleIndexHash) { +TEST_F(BlockTest, SimpleIndexHash) { const int kMaxKey = 100000; std::vector keys; std::vector values; @@ -215,7 +215,7 @@ TEST(BlockTest, SimpleIndexHash) { CheckBlockContents(std::move(contents), kMaxKey, keys, values); } -TEST(BlockTest, IndexHashWithSharedPrefix) { +TEST_F(BlockTest, IndexHashWithSharedPrefix) { const int kMaxKey = 100000; // for each prefix, there will be 5 keys starts with it. const int kPrefixGroup = 5; @@ -236,6 +236,7 @@ TEST(BlockTest, IndexHashWithSharedPrefix) { } // namespace rocksdb -int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index ecd23aff5..cab5dafb0 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -25,7 +25,7 @@ uint64_t GetSliceHash(const Slice& s, uint32_t index, } } // namespace -class CuckooBuilderTest { +class CuckooBuilderTest : public testing::Test { public: CuckooBuilderTest() { env_ = Env::Default(); @@ -129,7 +129,7 @@ class CuckooBuilderTest { const double kHashTableRatio = 0.9; }; -TEST(CuckooBuilderTest, SuccessWithEmptyFile) { +TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) { unique_ptr writable_file; fname = test::TmpDir() + "/EmptyFile"; ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); @@ -142,7 +142,7 @@ TEST(CuckooBuilderTest, SuccessWithEmptyFile) { CheckFileContents({}, {}, {}, "", 2, 2, false); } -TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) { +TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) { uint32_t num_hash_fun = 4; std::vector user_keys = {"key01", "key02", "key03", "key04"}; std::vector values = {"v01", "v02", "v03", "v04"}; @@ -182,7 +182,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) { expected_unused_bucket, expected_table_size, 2, false); } -TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) { +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) { uint32_t num_hash_fun = 4; std::vector user_keys = {"key01", "key02", "key03", "key04"}; std::vector values = {"v01", "v02", "v03", "v04"}; @@ -222,7 +222,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) { expected_unused_bucket, expected_table_size, 4, false); } -TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) { +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) { uint32_t num_hash_fun = 4; std::vector user_keys = {"key01", "key02", "key03", "key04"}; std::vector values = {"v01", "v02", "v03", "v04"}; @@ -264,7 +264,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) { expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size); } -TEST(CuckooBuilderTest, WithCollisionPathFullKey) { +TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) { // Have two hash functions. Insert elements with overlapping hashes. // Finally insert an element with hash value somewhere in the middle // so that it displaces all the elements after that. @@ -309,7 +309,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKey) { expected_unused_bucket, expected_table_size, 2, false); } -TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) { +TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) { uint32_t num_hash_fun = 2; std::vector user_keys = {"key01", "key02", "key03", "key04", "key05"}; @@ -351,7 +351,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) { expected_unused_bucket, expected_table_size, 2, false, 2); } -TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) { +TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) { uint32_t num_hash_fun = 4; std::vector user_keys = {"key01", "key02", "key03", "key04"}; std::vector values = {"v01", "v02", "v03", "v04"}; @@ -387,7 +387,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) { expected_unused_bucket, expected_table_size, 2, true); } -TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) { +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) { uint32_t num_hash_fun = 4; std::vector user_keys = {"key01", "key02", "key03", "key04"}; std::vector values = {"v01", "v02", "v03", "v04"}; @@ -423,7 +423,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) { expected_unused_bucket, expected_table_size, 4, true); } -TEST(CuckooBuilderTest, WithCollisionPathUserKey) { +TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) { uint32_t num_hash_fun = 2; std::vector user_keys = {"key01", "key02", "key03", "key04", "key05"}; @@ -461,7 +461,7 @@ TEST(CuckooBuilderTest, WithCollisionPathUserKey) { expected_unused_bucket, expected_table_size, 2, true); } -TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) { +TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) { // Have two hash functions. Insert elements with overlapping hashes. // Finally try inserting an element with hash value somewhere in the middle // and it should fail because the no. of elements to displace is too high. @@ -491,7 +491,7 @@ TEST(CuckooBuilderTest, FailWhenCollisionPathTooLong) { ASSERT_OK(writable_file->Close()); } -TEST(CuckooBuilderTest, FailWhenSameKeyInserted) { +TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) { hash_map = {{"repeatedkey", {0, 1, 2, 3}}}; uint32_t num_hash_fun = 4; std::string user_key = "repeatedkey"; @@ -515,4 +515,7 @@ TEST(CuckooBuilderTest, FailWhenSameKeyInserted) { } } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index ef62e7d5e..91bca7e46 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -64,8 +64,10 @@ uint64_t GetSliceHash(const Slice& s, uint32_t index, } // namespace -class CuckooReaderTest { +class CuckooReaderTest : public testing::Test { public: + using testing::Test::SetUp; + CuckooReaderTest() { options.allow_mmap_reads = true; env = options.env; @@ -208,7 +210,7 @@ class CuckooReaderTest { EnvOptions env_options; }; -TEST(CuckooReaderTest, WhenKeyExists) { +TEST_F(CuckooReaderTest, WhenKeyExists) { SetUp(kNumHashFunc); fname = test::TmpDir() + "/CuckooReader_WhenKeyExists"; for (uint64_t i = 0; i < num_items; i++) { @@ -235,7 +237,7 @@ TEST(CuckooReaderTest, WhenKeyExists) { CreateCuckooFileAndCheckReader(); } -TEST(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) { +TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) { SetUp(kNumHashFunc); fname = test::TmpDir() + "/CuckooReaderUint64_WhenKeyExists"; for (uint64_t i = 0; i < num_items; i++) { @@ -263,7 +265,7 @@ TEST(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) { CreateCuckooFileAndCheckReader(test::Uint64Comparator()); } -TEST(CuckooReaderTest, CheckIterator) { +TEST_F(CuckooReaderTest, CheckIterator) { SetUp(2*kNumHashFunc); fname = test::TmpDir() + "/CuckooReader_CheckIterator"; for (uint64_t i = 0; i < num_items; i++) { @@ -282,7 +284,7 @@ TEST(CuckooReaderTest, CheckIterator) { CheckIterator(); } -TEST(CuckooReaderTest, CheckIteratorUint64) { +TEST_F(CuckooReaderTest, CheckIteratorUint64) { SetUp(2*kNumHashFunc); fname = test::TmpDir() + "/CuckooReader_CheckIterator"; for (uint64_t i = 0; i < num_items; i++) { @@ -302,7 +304,7 @@ TEST(CuckooReaderTest, CheckIteratorUint64) { CheckIterator(test::Uint64Comparator()); } -TEST(CuckooReaderTest, WhenKeyNotFound) { +TEST_F(CuckooReaderTest, WhenKeyNotFound) { // Add keys with colliding hash values. SetUp(kNumHashFunc); fname = test::TmpDir() + "/CuckooReader_WhenKeyNotFound"; @@ -503,7 +505,7 @@ void ReadKeys(uint64_t num, uint32_t batch_size) { } } // namespace. -TEST(CuckooReaderTest, TestReadPerformance) { +TEST_F(CuckooReaderTest, TestReadPerformance) { if (!FLAGS_enable_perf) { return; } @@ -534,8 +536,9 @@ TEST(CuckooReaderTest, TestReadPerformance) { } // namespace rocksdb int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); ParseCommandLineFlags(&argc, &argv, true); - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } #endif // GFLAGS. diff --git a/table/full_filter_block_test.cc b/table/full_filter_block_test.cc index d102b7d92..0275a6ca6 100644 --- a/table/full_filter_block_test.cc +++ b/table/full_filter_block_test.cc @@ -93,7 +93,7 @@ class TestHashFilter : public FilterPolicy { } }; -class PluginFullFilterBlockTest { +class PluginFullFilterBlockTest : public testing::Test { public: BlockBasedTableOptions table_options_; @@ -102,7 +102,7 @@ class PluginFullFilterBlockTest { } }; -TEST(PluginFullFilterBlockTest, PluginEmptyBuilder) { +TEST_F(PluginFullFilterBlockTest, PluginEmptyBuilder) { FullFilterBlockBuilder builder( nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); Slice block = builder.Finish(); @@ -115,7 +115,7 @@ TEST(PluginFullFilterBlockTest, PluginEmptyBuilder) { ASSERT_TRUE(reader.KeyMayMatch("foo")); } -TEST(PluginFullFilterBlockTest, PluginSingleChunk) { +TEST_F(PluginFullFilterBlockTest, PluginSingleChunk) { FullFilterBlockBuilder builder( nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); builder.Add("foo"); @@ -136,7 +136,7 @@ TEST(PluginFullFilterBlockTest, PluginSingleChunk) { ASSERT_TRUE(!reader.KeyMayMatch("other")); } -class FullFilterBlockTest { +class FullFilterBlockTest : public testing::Test { public: BlockBasedTableOptions table_options_; @@ -147,7 +147,7 @@ class FullFilterBlockTest { ~FullFilterBlockTest() {} }; -TEST(FullFilterBlockTest, EmptyBuilder) { +TEST_F(FullFilterBlockTest, EmptyBuilder) { FullFilterBlockBuilder builder( nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); Slice block = builder.Finish(); @@ -160,7 +160,7 @@ TEST(FullFilterBlockTest, EmptyBuilder) { ASSERT_TRUE(reader.KeyMayMatch("foo")); } -TEST(FullFilterBlockTest, SingleChunk) { +TEST_F(FullFilterBlockTest, SingleChunk) { FullFilterBlockBuilder builder( nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); builder.Add("foo"); @@ -183,4 +183,7 @@ TEST(FullFilterBlockTest, SingleChunk) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/merger_test.cc b/table/merger_test.cc index d8502029f..1085ce452 100644 --- a/table/merger_test.cc +++ b/table/merger_test.cc @@ -44,7 +44,7 @@ class VectorIterator : public Iterator { size_t current_; }; -class MergerTest { +class MergerTest : public testing::Test { public: MergerTest() : rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {} @@ -139,7 +139,7 @@ class MergerTest { std::vector all_keys_; }; -TEST(MergerTest, SeekToRandomNextTest) { +TEST_F(MergerTest, SeekToRandomNextTest) { Generate(1000, 50, 50); for (int i = 0; i < 10; ++i) { SeekToRandom(); @@ -148,7 +148,7 @@ TEST(MergerTest, SeekToRandomNextTest) { } } -TEST(MergerTest, SeekToRandomNextSmallStringsTest) { +TEST_F(MergerTest, SeekToRandomNextSmallStringsTest) { Generate(1000, 50, 2); for (int i = 0; i < 10; ++i) { SeekToRandom(); @@ -157,7 +157,7 @@ TEST(MergerTest, SeekToRandomNextSmallStringsTest) { } } -TEST(MergerTest, SeekToRandomPrevTest) { +TEST_F(MergerTest, SeekToRandomPrevTest) { Generate(1000, 50, 50); for (int i = 0; i < 10; ++i) { SeekToRandom(); @@ -166,7 +166,7 @@ TEST(MergerTest, SeekToRandomPrevTest) { } } -TEST(MergerTest, SeekToRandomRandomTest) { +TEST_F(MergerTest, SeekToRandomRandomTest) { Generate(200, 50, 50); for (int i = 0; i < 3; ++i) { SeekToRandom(); @@ -175,7 +175,7 @@ TEST(MergerTest, SeekToRandomRandomTest) { } } -TEST(MergerTest, SeekToFirstTest) { +TEST_F(MergerTest, SeekToFirstTest) { Generate(1000, 50, 50); for (int i = 0; i < 10; ++i) { SeekToFirst(); @@ -184,7 +184,7 @@ TEST(MergerTest, SeekToFirstTest) { } } -TEST(MergerTest, SeekToLastTest) { +TEST_F(MergerTest, SeekToLastTest) { Generate(1000, 50, 50); for (int i = 0; i < 10; ++i) { SeekToLast(); @@ -195,4 +195,7 @@ TEST(MergerTest, SeekToLastTest) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/table_test.cc b/table/table_test.cc index 411c129c8..e713f19bf 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -698,7 +698,7 @@ class FixedOrLessPrefixTransform : public SliceTransform { } }; -class HarnessTest { +class HarnessTest : public testing::Test { public: HarnessTest() : ioptions_(options_), @@ -1006,7 +1006,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { } // Tests against all kinds of tables -class TableTest { +class TableTest : public testing::Test { public: const InternalKeyComparator& GetPlainInternalComparator( const Comparator* comp) { @@ -1024,11 +1024,11 @@ class TableTest { class GeneralTableTest : public TableTest {}; class BlockBasedTableTest : public TableTest {}; class PlainTableTest : public TableTest {}; -class TablePropertyTest {}; +class TablePropertyTest : public testing::Test {}; // This test serves as the living tutorial for the prefix scan of user collected // properties. -TEST(TablePropertyTest, PrefixScanTest) { +TEST_F(TablePropertyTest, PrefixScanTest) { UserCollectedProperties props{{"num.111.1", "1"}, {"num.111.2", "2"}, {"num.111.3", "3"}, @@ -1065,7 +1065,7 @@ TEST(TablePropertyTest, PrefixScanTest) { // This test include all the basic checks except those for index size and block // size, which will be conducted in separated unit tests. -TEST(BlockBasedTableTest, BasicBlockBasedTableProperties) { +TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) { TableConstructor c(BytewiseComparator()); c.Add("a1", "val1"); @@ -1110,7 +1110,7 @@ TEST(BlockBasedTableTest, BasicBlockBasedTableProperties) { ASSERT_EQ(content.size() + kBlockTrailerSize, props.data_size); } -TEST(BlockBasedTableTest, FilterPolicyNameProperties) { +TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) { TableConstructor c(BytewiseComparator(), true); c.Add("a1", "val1"); std::vector keys; @@ -1168,8 +1168,7 @@ void PrefetchRange(TableConstructor* c, Options* opt, AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache); } - -TEST(BlockBasedTableTest, PrefetchTest) { +TEST_F(BlockBasedTableTest, PrefetchTest) { // The purpose of this test is to test the prefetching operation built into // BlockBasedTable. Options opt; @@ -1251,8 +1250,7 @@ TEST(BlockBasedTableTest, PrefetchTest) { Status::InvalidArgument(Slice("k06 "), Slice("k07"))); } - -TEST(BlockBasedTableTest, TotalOrderSeekOnHashIndex) { +TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) { BlockBasedTableOptions table_options; for (int i = 0; i < 4; ++i) { Options options; @@ -1349,7 +1347,7 @@ void AddInternalKey(TableConstructor* c, const std::string& prefix, c->Add(k.Encode().ToString(), "v"); } -TEST(TableTest, HashIndexTest) { +TEST_F(TableTest, HashIndexTest) { TableConstructor c(BytewiseComparator()); // keys with prefix length 3, make sure the key/value is big enough to fill @@ -1462,7 +1460,7 @@ TEST(TableTest, HashIndexTest) { // It's very hard to figure out the index block size of a block accurately. // To make sure we get the index size, we just make sure as key number // grows, the filter block size also grows. -TEST(BlockBasedTableTest, IndexSizeStat) { +TEST_F(BlockBasedTableTest, IndexSizeStat) { uint64_t last_index_size = 0; // we need to use random keys since the pure human readable texts @@ -1500,7 +1498,7 @@ TEST(BlockBasedTableTest, IndexSizeStat) { } } -TEST(BlockBasedTableTest, NumBlockStat) { +TEST_F(BlockBasedTableTest, NumBlockStat) { Random rnd(test::RandomSeed()); TableConstructor c(BytewiseComparator()); Options options; @@ -1581,7 +1579,7 @@ class BlockCachePropertiesSnapshot { // Make sure, by default, index/filter blocks were pre-loaded (meaning we won't // use block cache to store them). -TEST(BlockBasedTableTest, BlockCacheDisabledTest) { +TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) { Options options; options.create_if_missing = true; options.statistics = CreateDBStatistics(); @@ -1624,7 +1622,7 @@ TEST(BlockBasedTableTest, BlockCacheDisabledTest) { // Due to the difficulities of the intersaction between statistics, this test // only tests the case when "index block is put to block cache" -TEST(BlockBasedTableTest, FilterBlockInBlockCache) { +TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) { // -- Table construction Options options; options.create_if_missing = true; @@ -1756,7 +1754,7 @@ TEST(BlockBasedTableTest, FilterBlockInBlockCache) { props.AssertFilterBlockStat(0, 0); } -TEST(BlockBasedTableTest, BlockCacheLeak) { +TEST_F(BlockBasedTableTest, BlockCacheLeak) { // Check that when we reopen a table we don't lose access to blocks already // in the cache. This test checks whether the Table actually makes use of the // unique ID from the file. @@ -1811,7 +1809,7 @@ TEST(BlockBasedTableTest, BlockCacheLeak) { } } -TEST(PlainTableTest, BasicPlainTableProperties) { +TEST_F(PlainTableTest, BasicPlainTableProperties) { PlainTableOptions plain_table_options; plain_table_options.user_key_len = 8; plain_table_options.bloom_bits_per_key = 8; @@ -1851,7 +1849,7 @@ TEST(PlainTableTest, BasicPlainTableProperties) { ASSERT_EQ(1ul, props->num_data_blocks); } -TEST(GeneralTableTest, ApproximateOffsetOfPlain) { +TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) { TableConstructor c(BytewiseComparator()); c.Add("k01", "hello"); c.Add("k02", "hello2"); @@ -1910,7 +1908,7 @@ static void DoCompressionTest(CompressionType comp) { ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6100)); } -TEST(GeneralTableTest, ApproximateOffsetOfCompressed) { +TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) { std::vector compression_state; if (!SnappyCompressionSupported()) { fprintf(stderr, "skipping snappy compression tests\n"); @@ -1950,7 +1948,7 @@ TEST(GeneralTableTest, ApproximateOffsetOfCompressed) { } } -TEST(HarnessTest, Randomized) { +TEST_F(HarnessTest, Randomized) { std::vector args = GenerateArgList(); for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); @@ -1971,7 +1969,7 @@ TEST(HarnessTest, Randomized) { } } -TEST(HarnessTest, RandomizedLongDB) { +TEST_F(HarnessTest, RandomizedLongDB) { Random rnd(test::RandomSeed()); TestArgs args = { DB_TEST, false, 16, kNoCompression, 0 }; Init(args); @@ -1995,9 +1993,9 @@ TEST(HarnessTest, RandomizedLongDB) { ASSERT_GT(files, 0); } -class MemTableTest { }; +class MemTableTest : public testing::Test {}; -TEST(MemTableTest, Simple) { +TEST_F(MemTableTest, Simple) { InternalKeyComparator cmp(BytewiseComparator()); auto table_factory = std::make_shared(); Options options; @@ -2030,7 +2028,7 @@ TEST(MemTableTest, Simple) { } // Test the empty key -TEST(HarnessTest, SimpleEmptyKey) { +TEST_F(HarnessTest, SimpleEmptyKey) { auto args = GenerateArgList(); for (const auto& arg : args) { Init(arg); @@ -2040,7 +2038,7 @@ TEST(HarnessTest, SimpleEmptyKey) { } } -TEST(HarnessTest, SimpleSingle) { +TEST_F(HarnessTest, SimpleSingle) { auto args = GenerateArgList(); for (const auto& arg : args) { Init(arg); @@ -2050,7 +2048,7 @@ TEST(HarnessTest, SimpleSingle) { } } -TEST(HarnessTest, SimpleMulti) { +TEST_F(HarnessTest, SimpleMulti) { auto args = GenerateArgList(); for (const auto& arg : args) { Init(arg); @@ -2062,7 +2060,7 @@ TEST(HarnessTest, SimpleMulti) { } } -TEST(HarnessTest, SimpleSpecialKey) { +TEST_F(HarnessTest, SimpleSpecialKey) { auto args = GenerateArgList(); for (const auto& arg : args) { Init(arg); @@ -2072,7 +2070,7 @@ TEST(HarnessTest, SimpleSpecialKey) { } } -TEST(HarnessTest, FooterTests) { +TEST_F(HarnessTest, FooterTests) { { // upconvert legacy block based std::string encoded; @@ -2175,5 +2173,6 @@ TEST(HarnessTest, FooterTests) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index 5d8d8d164..c21c6d713 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -13,7 +13,7 @@ namespace rocksdb { -class ReduceLevelTest { +class ReduceLevelTest : public testing::Test { public: ReduceLevelTest() { dbname_ = test::TmpDir() + "/db_reduce_levels_test"; @@ -94,7 +94,7 @@ bool ReduceLevelTest::ReduceLevels(int target_level) { return is_succeed; } -TEST(ReduceLevelTest, Last_Level) { +TEST_F(ReduceLevelTest, Last_Level) { // create files on all levels; ASSERT_OK(OpenDB(true, 4, 3)); ASSERT_OK(Put("aaaa", "11111")); @@ -113,7 +113,7 @@ TEST(ReduceLevelTest, Last_Level) { CloseDB(); } -TEST(ReduceLevelTest, Top_Level) { +TEST_F(ReduceLevelTest, Top_Level) { // create files on all levels; ASSERT_OK(OpenDB(true, 5, 0)); ASSERT_OK(Put("aaaa", "11111")); @@ -134,7 +134,7 @@ TEST(ReduceLevelTest, Top_Level) { CloseDB(); } -TEST(ReduceLevelTest, All_Levels) { +TEST_F(ReduceLevelTest, All_Levels) { // create files on all levels; ASSERT_OK(OpenDB(true, 5, 1)); ASSERT_OK(Put("a", "a11111")); @@ -194,5 +194,6 @@ TEST(ReduceLevelTest, All_Levels) { } int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/arena_test.cc b/util/arena_test.cc index 7f55a7e53..a3b96bb1c 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -16,9 +16,9 @@ namespace rocksdb { namespace { const size_t kHugePageSize = 2 * 1024 * 1024; } // namespace -class ArenaTest {}; +class ArenaTest : public testing::Test {}; -TEST(ArenaTest, Empty) { Arena arena0; } +TEST_F(ArenaTest, Empty) { Arena arena0; } namespace { void MemoryAllocatedBytesTest(size_t huge_page_size) { @@ -166,20 +166,23 @@ static void SimpleTest(size_t huge_page_size) { } } // namespace -TEST(ArenaTest, MemoryAllocatedBytes) { +TEST_F(ArenaTest, MemoryAllocatedBytes) { MemoryAllocatedBytesTest(0); MemoryAllocatedBytesTest(kHugePageSize); } -TEST(ArenaTest, ApproximateMemoryUsage) { +TEST_F(ArenaTest, ApproximateMemoryUsage) { ApproximateMemoryUsageTest(0); ApproximateMemoryUsageTest(kHugePageSize); } -TEST(ArenaTest, Simple) { +TEST_F(ArenaTest, Simple) { SimpleTest(0); SimpleTest(kHugePageSize); } } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/auto_roll_logger_test.cc b/util/auto_roll_logger_test.cc index 4c0e5d8a5..6733a62a4 100644 --- a/util/auto_roll_logger_test.cc +++ b/util/auto_roll_logger_test.cc @@ -20,7 +20,7 @@ using namespace std; namespace rocksdb { -class AutoRollLoggerTest { +class AutoRollLoggerTest : public testing::Test { public: static void InitTestDb() { string deleteCmd = "rm -rf " + kTestDir; @@ -135,7 +135,7 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest( return expected_create_time; } -TEST(AutoRollLoggerTest, RollLogFileBySize) { +TEST_F(AutoRollLoggerTest, RollLogFileBySize) { InitTestDb(); size_t log_max_size = 1024 * 5; @@ -145,7 +145,7 @@ TEST(AutoRollLoggerTest, RollLogFileBySize) { kSampleMessage + ":RollLogFileBySize"); } -TEST(AutoRollLoggerTest, RollLogFileByTime) { +TEST_F(AutoRollLoggerTest, RollLogFileByTime) { size_t time = 2; size_t log_size = 1024 * 5; @@ -158,8 +158,7 @@ TEST(AutoRollLoggerTest, RollLogFileByTime) { RollLogFileByTimeTest(&logger, time, kSampleMessage + ":RollLogFileByTime"); } -TEST(AutoRollLoggerTest, - OpenLogFilesMultipleTimesWithOptionLog_max_size) { +TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) { // If only 'log_max_size' options is specified, then every time // when rocksdb is restarted, a new empty log file will be created. InitTestDb(); @@ -184,7 +183,7 @@ TEST(AutoRollLoggerTest, delete logger; } -TEST(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) { +TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) { size_t time = 2, log_max_size = 1024 * 5; InitTestDb(); @@ -201,7 +200,7 @@ TEST(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) { kSampleMessage + ":CompositeRollByTimeAndSizeLogger"); } -TEST(AutoRollLoggerTest, CreateLoggerFromOptions) { +TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { DBOptions options; shared_ptr logger; @@ -246,7 +245,7 @@ TEST(AutoRollLoggerTest, CreateLoggerFromOptions) { kSampleMessage + ":CreateLoggerFromOptions - both"); } -TEST(AutoRollLoggerTest, InfoLogLevel) { +TEST_F(AutoRollLoggerTest, InfoLogLevel) { InitTestDb(); size_t log_size = 8192; @@ -325,7 +324,7 @@ static size_t GetLinesCount(const string& fname, const string& pattern) { return count; } -TEST(AutoRollLoggerTest, LogHeaderTest) { +TEST_F(AutoRollLoggerTest, LogHeaderTest) { static const size_t MAX_HEADERS = 10; static const size_t LOG_MAX_SIZE = 1024 * 5; static const std::string HEADER_STR = "Log header line"; @@ -368,7 +367,7 @@ TEST(AutoRollLoggerTest, LogHeaderTest) { } } -TEST(AutoRollLoggerTest, LogFileExistence) { +TEST_F(AutoRollLoggerTest, LogFileExistence) { rocksdb::DB* db; rocksdb::Options options; string deleteCmd = "rm -rf " + kTestDir; @@ -383,5 +382,6 @@ TEST(AutoRollLoggerTest, LogFileExistence) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/autovector_test.cc b/util/autovector_test.cc index 4ff982250..5a347725a 100644 --- a/util/autovector_test.cc +++ b/util/autovector_test.cc @@ -16,10 +16,10 @@ namespace rocksdb { using namespace std; -class AutoVectorTest { }; +class AutoVectorTest : public testing::Test {}; const unsigned long kSize = 8; -TEST(AutoVectorTest, PushBackAndPopBack) { +TEST_F(AutoVectorTest, PushBackAndPopBack) { autovector vec; ASSERT_TRUE(vec.empty()); ASSERT_EQ(0ul, vec.size()); @@ -48,7 +48,7 @@ TEST(AutoVectorTest, PushBackAndPopBack) { ASSERT_TRUE(vec.empty()); } -TEST(AutoVectorTest, EmplaceBack) { +TEST_F(AutoVectorTest, EmplaceBack) { typedef std::pair ValType; autovector vec; @@ -71,7 +71,7 @@ TEST(AutoVectorTest, EmplaceBack) { ASSERT_TRUE(!vec.only_in_stack()); } -TEST(AutoVectorTest, Resize) { +TEST_F(AutoVectorTest, Resize) { autovector vec; vec.resize(kSize); @@ -105,7 +105,7 @@ void AssertEqual( } } // namespace -TEST(AutoVectorTest, CopyAndAssignment) { +TEST_F(AutoVectorTest, CopyAndAssignment) { // Test both heap-allocated and stack-allocated cases. for (auto size : { kSize / 2, kSize * 1000 }) { autovector vec; @@ -126,7 +126,7 @@ TEST(AutoVectorTest, CopyAndAssignment) { } } -TEST(AutoVectorTest, Iterators) { +TEST_F(AutoVectorTest, Iterators) { autovector vec; for (size_t i = 0; i < kSize * 1000; ++i) { vec.push_back(ToString(i)); @@ -246,7 +246,7 @@ size_t BenchmarkSequenceAccess(string name, size_t ops, size_t elem_size) { // This test case only reports the performance between std::vector // and autovector. We chose string for comparison because in most // o our use cases we used std::vector. -TEST(AutoVectorTest, PerfBench) { +TEST_F(AutoVectorTest, PerfBench) { // We run same operations for kOps times in order to get a more fair result. size_t kOps = 100000; @@ -313,5 +313,6 @@ TEST(AutoVectorTest, PerfBench) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 3d8764b7e..237bf7d87 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -50,7 +50,7 @@ static int NextLength(int length) { return length; } -class BloomTest { +class BloomTest : public testing::Test { private: const FilterPolicy* policy_; std::string filter_; @@ -119,12 +119,12 @@ class BloomTest { } }; -TEST(BloomTest, EmptyFilter) { +TEST_F(BloomTest, EmptyFilter) { ASSERT_TRUE(! Matches("hello")); ASSERT_TRUE(! Matches("world")); } -TEST(BloomTest, Small) { +TEST_F(BloomTest, Small) { Add("hello"); Add("world"); ASSERT_TRUE(Matches("hello")); @@ -133,7 +133,7 @@ TEST(BloomTest, Small) { ASSERT_TRUE(! Matches("foo")); } -TEST(BloomTest, VaryingLengths) { +TEST_F(BloomTest, VaryingLengths) { char buffer[sizeof(int)]; // Count number of filters that significantly exceed the false positive rate @@ -174,7 +174,7 @@ TEST(BloomTest, VaryingLengths) { // Different bits-per-byte -class FullBloomTest { +class FullBloomTest : public testing::Test { private: const FilterPolicy* policy_; std::unique_ptr bits_builder_; @@ -233,13 +233,13 @@ class FullBloomTest { } }; -TEST(FullBloomTest, FullEmptyFilter) { +TEST_F(FullBloomTest, FullEmptyFilter) { // Empty filter is not match, at this level ASSERT_TRUE(!Matches("hello")); ASSERT_TRUE(!Matches("world")); } -TEST(FullBloomTest, FullSmall) { +TEST_F(FullBloomTest, FullSmall) { Add("hello"); Add("world"); ASSERT_TRUE(Matches("hello")); @@ -248,7 +248,7 @@ TEST(FullBloomTest, FullSmall) { ASSERT_TRUE(!Matches("foo")); } -TEST(FullBloomTest, FullVaryingLengths) { +TEST_F(FullBloomTest, FullVaryingLengths) { char buffer[sizeof(int)]; // Count number of filters that significantly exceed the false positive rate @@ -292,9 +292,10 @@ TEST(FullBloomTest, FullVaryingLengths) { } // namespace rocksdb int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); ParseCommandLineFlags(&argc, &argv, true); - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } #endif // GFLAGS diff --git a/util/cache_test.cc b/util/cache_test.cc index ea71124b2..72880ffa5 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -32,7 +32,7 @@ static int DecodeValue(void* v) { return static_cast(reinterpret_cast(v)); } -class CacheTest { +class CacheTest : public testing::Test { public: static CacheTest* current_; @@ -113,7 +113,7 @@ namespace { void dumbDeleter(const Slice& key, void* value) { } } // namespace -TEST(CacheTest, UsageTest) { +TEST_F(CacheTest, UsageTest) { // cache is shared_ptr and will be automatically cleaned up. const uint64_t kCapacity = 100000; auto cache = NewLRUCache(kCapacity, 8, 200); @@ -144,7 +144,7 @@ TEST(CacheTest, UsageTest) { ASSERT_LT(kCapacity * 0.95, cache->GetUsage()); } -TEST(CacheTest, HitAndMiss) { +TEST_F(CacheTest, HitAndMiss) { ASSERT_EQ(-1, Lookup(100)); Insert(100, 101); @@ -167,7 +167,7 @@ TEST(CacheTest, HitAndMiss) { ASSERT_EQ(101, deleted_values_[0]); } -TEST(CacheTest, Erase) { +TEST_F(CacheTest, Erase) { Erase(200); ASSERT_EQ(0U, deleted_keys_.size()); @@ -186,7 +186,7 @@ TEST(CacheTest, Erase) { ASSERT_EQ(1U, deleted_keys_.size()); } -TEST(CacheTest, EntriesArePinned) { +TEST_F(CacheTest, EntriesArePinned) { Insert(100, 101); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); @@ -216,7 +216,7 @@ TEST(CacheTest, EntriesArePinned) { ASSERT_EQ(0U, cache_->GetUsage()); } -TEST(CacheTest, EvictionPolicy) { +TEST_F(CacheTest, EvictionPolicy) { Insert(100, 101); Insert(200, 201); @@ -230,7 +230,7 @@ TEST(CacheTest, EvictionPolicy) { ASSERT_EQ(-1, Lookup(200)); } -TEST(CacheTest, EvictionPolicyRef) { +TEST_F(CacheTest, EvictionPolicyRef) { Insert(100, 101); Insert(101, 102); Insert(102, 103); @@ -278,7 +278,7 @@ TEST(CacheTest, EvictionPolicyRef) { cache_->Release(h204); } -TEST(CacheTest, ErasedHandleState) { +TEST_F(CacheTest, ErasedHandleState) { // insert a key and get two handles Insert(100, 1000); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); @@ -300,7 +300,7 @@ TEST(CacheTest, ErasedHandleState) { cache_->Release(h2); } -TEST(CacheTest, HeavyEntries) { +TEST_F(CacheTest, HeavyEntries) { // Add a bunch of light and heavy entries and then count the combined // size of items still in the cache, which must be approximately the // same as the total capacity. @@ -327,7 +327,7 @@ TEST(CacheTest, HeavyEntries) { ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10); } -TEST(CacheTest, NewId) { +TEST_F(CacheTest, NewId) { uint64_t a = cache_->NewId(); uint64_t b = cache_->NewId(); ASSERT_NE(a, b); @@ -349,7 +349,7 @@ void deleter(const Slice& key, void* value) { } } // namespace -TEST(CacheTest, OverCapacity) { +TEST_F(CacheTest, OverCapacity) { size_t n = 10; // a LRUCache with n entries and one shard only @@ -403,7 +403,7 @@ void callback(void* entry, size_t charge) { } }; -TEST(CacheTest, ApplyToAllCacheEntiresTest) { +TEST_F(CacheTest, ApplyToAllCacheEntiresTest) { std::vector> inserted; callback_state.clear(); @@ -421,5 +421,6 @@ TEST(CacheTest, ApplyToAllCacheEntiresTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/coding_test.cc b/util/coding_test.cc index 3dbe7befe..e3c265b69 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -199,5 +199,6 @@ TEST(Coding, Strings) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 300c9d3c7..413302a24 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -73,5 +73,6 @@ TEST(CRC, Mask) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/dynamic_bloom_test.cc b/util/dynamic_bloom_test.cc index 5e143551e..fb10d0974 100644 --- a/util/dynamic_bloom_test.cc +++ b/util/dynamic_bloom_test.cc @@ -40,10 +40,9 @@ static Slice Key(uint64_t i, char* buffer) { return Slice(buffer, sizeof(i)); } -class DynamicBloomTest { -}; +class DynamicBloomTest : public testing::Test {}; -TEST(DynamicBloomTest, EmptyFilter) { +TEST_F(DynamicBloomTest, EmptyFilter) { Arena arena; DynamicBloom bloom1(&arena, 100, 0, 2); ASSERT_TRUE(!bloom1.MayContain("hello")); @@ -54,7 +53,7 @@ TEST(DynamicBloomTest, EmptyFilter) { ASSERT_TRUE(!bloom2.MayContain("world")); } -TEST(DynamicBloomTest, Small) { +TEST_F(DynamicBloomTest, Small) { Arena arena; DynamicBloom bloom1(&arena, 100, 0, 2); bloom1.Add("hello"); @@ -86,7 +85,7 @@ static uint32_t NextNum(uint32_t num) { return num; } -TEST(DynamicBloomTest, VaryingLengths) { +TEST_F(DynamicBloomTest, VaryingLengths) { char buffer[sizeof(uint64_t)]; // Count number of filters that significantly exceed the false positive rate @@ -146,7 +145,7 @@ TEST(DynamicBloomTest, VaryingLengths) { } } -TEST(DynamicBloomTest, perf) { +TEST_F(DynamicBloomTest, perf) { StopWatchNano timer(Env::Default()); uint32_t num_probes = static_cast(FLAGS_num_probes); @@ -215,9 +214,10 @@ TEST(DynamicBloomTest, perf) { } // namespace rocksdb int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); ParseCommandLineFlags(&argc, &argv, true); - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } #endif // GFLAGS diff --git a/util/env_test.cc b/util/env_test.cc index d49fff77b..123dab07d 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -34,7 +34,7 @@ namespace rocksdb { static const int kDelayMicros = 100000; -class EnvPosixTest { +class EnvPosixTest : public testing::Test { private: port::Mutex mu_; std::string events_; @@ -89,14 +89,14 @@ class SleepingBackgroundTask { bool sleeping_; }; -TEST(EnvPosixTest, RunImmediately) { +TEST_F(EnvPosixTest, RunImmediately) { std::atomic called(false); env_->Schedule(&SetBool, &called); Env::Default()->SleepForMicroseconds(kDelayMicros); ASSERT_TRUE(called.load(std::memory_order_relaxed)); } -TEST(EnvPosixTest, UnSchedule) { +TEST_F(EnvPosixTest, UnSchedule) { std::atomic called(false); env_->SetBackgroundThreads(1, Env::LOW); @@ -124,7 +124,7 @@ TEST(EnvPosixTest, UnSchedule) { ASSERT_TRUE(called.load(std::memory_order_relaxed)); } -TEST(EnvPosixTest, RunMany) { +TEST_F(EnvPosixTest, RunMany) { std::atomic last_id(0); struct CB { @@ -170,7 +170,7 @@ static void ThreadBody(void* arg) { s->mu.Unlock(); } -TEST(EnvPosixTest, StartThread) { +TEST_F(EnvPosixTest, StartThread) { State state; state.val = 0; state.num_running = 3; @@ -189,8 +189,7 @@ TEST(EnvPosixTest, StartThread) { ASSERT_EQ(state.val, 3); } -TEST(EnvPosixTest, TwoPools) { - +TEST_F(EnvPosixTest, TwoPools) { class CB { public: CB(const std::string& pool_name, int pool_size) @@ -307,7 +306,7 @@ TEST(EnvPosixTest, TwoPools) { env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH); } -TEST(EnvPosixTest, DecreaseNumBgThreads) { +TEST_F(EnvPosixTest, DecreaseNumBgThreads) { std::vector tasks(10); // Set number of thread to 1 first. @@ -502,7 +501,7 @@ std::string GetOnDiskTestDir() { } // namespace // Only works in linux platforms -TEST(EnvPosixTest, RandomAccessUniqueID) { +TEST_F(EnvPosixTest, RandomAccessUniqueID) { // Create file. const EnvOptions soptions; std::string fname = GetOnDiskTestDir() + "/" + "testfile"; @@ -543,7 +542,7 @@ TEST(EnvPosixTest, RandomAccessUniqueID) { // only works in linux platforms #ifdef ROCKSDB_FALLOCATE_PRESENT -TEST(EnvPosixTest, AllocateTest) { +TEST_F(EnvPosixTest, AllocateTest) { std::string fname = GetOnDiskTestDir() + "/preallocate_testfile"; // Try fallocate in a file to see whether the target file system supports it. @@ -622,7 +621,7 @@ bool HasPrefix(const std::unordered_set& ss) { } // Only works in linux platforms -TEST(EnvPosixTest, RandomAccessUniqueIDConcurrent) { +TEST_F(EnvPosixTest, RandomAccessUniqueIDConcurrent) { // Check whether a bunch of concurrently existing files have unique IDs. const EnvOptions soptions; @@ -661,7 +660,7 @@ TEST(EnvPosixTest, RandomAccessUniqueIDConcurrent) { } // Only works in linux platforms -TEST(EnvPosixTest, RandomAccessUniqueIDDeletes) { +TEST_F(EnvPosixTest, RandomAccessUniqueIDDeletes) { const EnvOptions soptions; std::string fname = GetOnDiskTestDir() + "/" + "testfile"; @@ -697,7 +696,7 @@ TEST(EnvPosixTest, RandomAccessUniqueIDDeletes) { } // Only works in linux platforms -TEST(EnvPosixTest, InvalidateCache) { +TEST_F(EnvPosixTest, InvalidateCache) { const EnvOptions soptions; std::string fname = test::TmpDir() + "/" + "testfile"; @@ -739,7 +738,7 @@ TEST(EnvPosixTest, InvalidateCache) { #endif // not TRAVIS #endif // OS_LINUX -TEST(EnvPosixTest, PosixRandomRWFileTest) { +TEST_F(EnvPosixTest, PosixRandomRWFileTest) { EnvOptions soptions; soptions.use_mmap_writes = soptions.use_mmap_reads = false; std::string fname = test::TmpDir() + "/" + "testfile"; @@ -797,7 +796,7 @@ class TestLogger : public Logger { int char_0_count; }; -TEST(EnvPosixTest, LogBufferTest) { +TEST_F(EnvPosixTest, LogBufferTest) { TestLogger test_logger; test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL); test_logger.log_count = 0; @@ -855,7 +854,7 @@ class TestLogger2 : public Logger { size_t max_log_size_; }; -TEST(EnvPosixTest, LogBufferMaxSizeTest) { +TEST_F(EnvPosixTest, LogBufferMaxSizeTest) { char bytes9000[9000]; std::fill_n(bytes9000, sizeof(bytes9000), '1'); bytes9000[sizeof(bytes9000) - 1] = '\0'; @@ -870,7 +869,7 @@ TEST(EnvPosixTest, LogBufferMaxSizeTest) { } } -TEST(EnvPosixTest, Preallocation) { +TEST_F(EnvPosixTest, Preallocation) { const std::string src = test::TmpDir() + "/" + "testfile"; unique_ptr srcfile; const EnvOptions soptions; @@ -903,5 +902,6 @@ TEST(EnvPosixTest, Preallocation) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/event_logger_test.cc b/util/event_logger_test.cc index 0d7985f61..1aad0acc2 100644 --- a/util/event_logger_test.cc +++ b/util/event_logger_test.cc @@ -10,7 +10,7 @@ namespace rocksdb { -class EventLoggerTest {}; +class EventLoggerTest : public testing::Test {}; class StringLogger : public Logger { public: @@ -24,7 +24,7 @@ class StringLogger : public Logger { char buffer_[1000]; }; -TEST(EventLoggerTest, SimpleTest) { +TEST_F(EventLoggerTest, SimpleTest) { StringLogger logger; EventLogger event_logger(&logger); event_logger.Log() << "id" << 5 << "event" @@ -38,5 +38,6 @@ TEST(EventLoggerTest, SimpleTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/filelock_test.cc b/util/filelock_test.cc index a9e30a5d3..33362f8c7 100644 --- a/util/filelock_test.cc +++ b/util/filelock_test.cc @@ -12,7 +12,7 @@ namespace rocksdb { -class LockTest { +class LockTest : public testing::Test { public: static LockTest* current_; std::string file_; @@ -36,7 +36,7 @@ class LockTest { }; LockTest* LockTest::current_; -TEST(LockTest, LockBySameThread) { +TEST_F(LockTest, LockBySameThread) { FileLock* lock1; FileLock* lock2; @@ -54,5 +54,6 @@ TEST(LockTest, LockBySameThread) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/histogram_test.cc b/util/histogram_test.cc index 065f9579a..edceffaaa 100644 --- a/util/histogram_test.cc +++ b/util/histogram_test.cc @@ -9,10 +9,9 @@ namespace rocksdb { -class HistogramTest { }; - -TEST(HistogramTest, BasicOperation) { +class HistogramTest : public testing::Test {}; +TEST_F(HistogramTest, BasicOperation) { HistogramImpl histogram; for (uint64_t i = 1; i <= 100; i++) { histogram.Add(i); @@ -37,14 +36,14 @@ TEST(HistogramTest, BasicOperation) { ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately caluclated. } -TEST(HistogramTest, EmptyHistogram) { +TEST_F(HistogramTest, EmptyHistogram) { HistogramImpl histogram; ASSERT_EQ(histogram.Median(), 0.0); ASSERT_EQ(histogram.Percentile(85.0), 0.0); ASSERT_EQ(histogram.Average(), 0.0); } -TEST(HistogramTest, ClearHistogram) { +TEST_F(HistogramTest, ClearHistogram) { HistogramImpl histogram; for (uint64_t i = 1; i <= 100; i++) { histogram.Add(i); @@ -58,5 +57,6 @@ TEST(HistogramTest, ClearHistogram) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/manual_compaction_test.cc b/util/manual_compaction_test.cc index 60f1290c9..6eedd0366 100644 --- a/util/manual_compaction_test.cc +++ b/util/manual_compaction_test.cc @@ -30,7 +30,7 @@ std::string Key2(int i) { return Key1(i) + "_xxx"; } -class ManualCompactionTest { +class ManualCompactionTest : public testing::Test { public: ManualCompactionTest() { // Get rid of any state from an old run. @@ -56,7 +56,7 @@ class DestroyAllCompactionFilter : public CompactionFilter { } }; -TEST(ManualCompactionTest, CompactTouchesAllKeys) { +TEST_F(ManualCompactionTest, CompactTouchesAllKeys) { for (int iter = 0; iter < 2; ++iter) { DB* db; Options options; @@ -92,8 +92,7 @@ TEST(ManualCompactionTest, CompactTouchesAllKeys) { } } -TEST(ManualCompactionTest, Test) { - +TEST_F(ManualCompactionTest, Test) { // Open database. Disable compression since it affects the creation // of layers and the code below is trying to test against a very // specific scenario. @@ -150,5 +149,6 @@ TEST(ManualCompactionTest, Test) { } // anonymous namespace int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/memenv_test.cc b/util/memenv_test.cc index 6154893f0..9222dc618 100644 --- a/util/memenv_test.cc +++ b/util/memenv_test.cc @@ -12,7 +12,7 @@ namespace rocksdb { -class MemEnvTest { +class MemEnvTest : public testing::Test { public: Env* env_; const EnvOptions soptions_; @@ -25,7 +25,7 @@ class MemEnvTest { } }; -TEST(MemEnvTest, Basics) { +TEST_F(MemEnvTest, Basics) { uint64_t file_size; unique_ptr writable_file; std::vector children; @@ -86,7 +86,7 @@ TEST(MemEnvTest, Basics) { ASSERT_OK(env_->DeleteDir("/dir")); } -TEST(MemEnvTest, ReadWrite) { +TEST_F(MemEnvTest, ReadWrite) { unique_ptr writable_file; unique_ptr seq_file; unique_ptr rand_file; @@ -126,7 +126,7 @@ TEST(MemEnvTest, ReadWrite) { ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok()); } -TEST(MemEnvTest, Locks) { +TEST_F(MemEnvTest, Locks) { FileLock* lock; // These are no-ops, but we test they return success. @@ -134,7 +134,7 @@ TEST(MemEnvTest, Locks) { ASSERT_OK(env_->UnlockFile(lock)); } -TEST(MemEnvTest, Misc) { +TEST_F(MemEnvTest, Misc) { std::string test_dir; ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_TRUE(!test_dir.empty()); @@ -149,7 +149,7 @@ TEST(MemEnvTest, Misc) { writable_file.reset(); } -TEST(MemEnvTest, LargeWrite) { +TEST_F(MemEnvTest, LargeWrite) { const size_t kWriteSize = 300 * 1024; char* scratch = new char[kWriteSize * 2]; @@ -181,7 +181,7 @@ TEST(MemEnvTest, LargeWrite) { delete [] scratch; } -TEST(MemEnvTest, DBTest) { +TEST_F(MemEnvTest, DBTest) { Options options; options.create_if_missing = true; options.env = env_; @@ -236,5 +236,6 @@ TEST(MemEnvTest, DBTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/mock_env_test.cc b/util/mock_env_test.cc index 521f0fb1c..57835323a 100644 --- a/util/mock_env_test.cc +++ b/util/mock_env_test.cc @@ -13,7 +13,7 @@ namespace rocksdb { -class MockEnvTest { +class MockEnvTest : public testing::Test { public: Env* env_; const EnvOptions soptions_; @@ -26,7 +26,7 @@ class MockEnvTest { } }; -TEST(MockEnvTest, Basics) { +TEST_F(MockEnvTest, Basics) { uint64_t file_size; unique_ptr writable_file; std::vector children; @@ -87,7 +87,7 @@ TEST(MockEnvTest, Basics) { ASSERT_OK(env_->DeleteDir("/dir")); } -TEST(MockEnvTest, ReadWrite) { +TEST_F(MockEnvTest, ReadWrite) { unique_ptr writable_file; unique_ptr seq_file; unique_ptr rand_file; @@ -127,7 +127,7 @@ TEST(MockEnvTest, ReadWrite) { ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok()); } -TEST(MockEnvTest, Locks) { +TEST_F(MockEnvTest, Locks) { FileLock* lock; // These are no-ops, but we test they return success. @@ -135,7 +135,7 @@ TEST(MockEnvTest, Locks) { ASSERT_OK(env_->UnlockFile(lock)); } -TEST(MockEnvTest, Misc) { +TEST_F(MockEnvTest, Misc) { std::string test_dir; ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_TRUE(!test_dir.empty()); @@ -150,7 +150,7 @@ TEST(MockEnvTest, Misc) { writable_file.reset(); } -TEST(MockEnvTest, LargeWrite) { +TEST_F(MockEnvTest, LargeWrite) { const size_t kWriteSize = 300 * 1024; char* scratch = new char[kWriteSize * 2]; @@ -182,7 +182,7 @@ TEST(MockEnvTest, LargeWrite) { delete [] scratch; } -TEST(MockEnvTest, Corrupt) { +TEST_F(MockEnvTest, Corrupt) { const std::string kGood = "this is a good string, synced to disk"; const std::string kCorrupted = "this part may be corrupted"; const std::string kFileName = "/dir/f"; @@ -221,7 +221,7 @@ TEST(MockEnvTest, Corrupt) { ASSERT_NE(result.compare(kCorrupted), 0); } -TEST(MockEnvTest, DBTest) { +TEST_F(MockEnvTest, DBTest) { Options options; options.create_if_missing = true; options.env = env_; @@ -267,5 +267,6 @@ TEST(MockEnvTest, DBTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/options_test.cc b/util/options_test.cc index 717f9322e..3b41a8ec3 100644 --- a/util/options_test.cc +++ b/util/options_test.cc @@ -32,7 +32,7 @@ DEFINE_bool(enable_print, false, "Print options generated to console."); namespace rocksdb { -class OptionsTest {}; +class OptionsTest : public testing::Test {}; class StderrLogger : public Logger { public: @@ -68,7 +68,7 @@ Options PrintAndGetOptions(size_t total_write_buffer_limit, return options; } -TEST(OptionsTest, LooseCondition) { +TEST_F(OptionsTest, LooseCondition) { Options options; PrintAndGetOptions(static_cast(10) * 1024 * 1024 * 1024, 100, 100); @@ -90,7 +90,7 @@ TEST(OptionsTest, LooseCondition) { } #ifndef ROCKSDB_LITE // GetOptionsFromMap is not supported in ROCKSDB_LITE -TEST(OptionsTest, GetOptionsFromMapTest) { +TEST_F(OptionsTest, GetOptionsFromMapTest) { std::unordered_map cf_options_map = { {"write_buffer_size", "1"}, {"max_write_buffer_number", "2"}, @@ -284,7 +284,7 @@ TEST(OptionsTest, GetOptionsFromMapTest) { #ifndef ROCKSDB_LITE // GetColumnFamilyOptionsFromString is not supported in // ROCKSDB_LITE -TEST(OptionsTest, GetColumnFamilyOptionsFromStringTest) { +TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) { ColumnFamilyOptions base_cf_opt; ColumnFamilyOptions new_cf_opt; base_cf_opt.table_factory.reset(); @@ -413,7 +413,7 @@ TEST(OptionsTest, GetColumnFamilyOptionsFromStringTest) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // GetBlockBasedTableOptionsFromString is not supported -TEST(OptionsTest, GetBlockBasedTableOptionsFromString) { +TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) { BlockBasedTableOptions table_opt; BlockBasedTableOptions new_opt; // make sure default values are overwritten by something else @@ -468,7 +468,7 @@ TEST(OptionsTest, GetBlockBasedTableOptionsFromString) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // GetOptionsFromString is not supported in RocksDB Lite -TEST(OptionsTest, GetOptionsFromStringTest) { +TEST_F(OptionsTest, GetOptionsFromStringTest) { Options base_options, new_options; base_options.write_buffer_size = 20; base_options.min_write_buffer_number_to_merge = 15; @@ -505,7 +505,7 @@ Status StringToMap( std::unordered_map* opts_map); #ifndef ROCKSDB_LITE // StringToMap is not supported in ROCKSDB_LITE -TEST(OptionsTest, StringToMapTest) { +TEST_F(OptionsTest, StringToMapTest) { std::unordered_map opts_map; // Regular options ASSERT_OK(StringToMap("k1=v1;k2=v2;k3=v3", &opts_map)); @@ -624,7 +624,7 @@ TEST(OptionsTest, StringToMapTest) { #endif // ROCKSDB_LITE #ifndef ROCKSDB_LITE // StringToMap is not supported in ROCKSDB_LITE -TEST(OptionsTest, StringToMapRandomTest) { +TEST_F(OptionsTest, StringToMapRandomTest) { std::unordered_map opts_map; // Make sure segfault is not hit by semi-random strings @@ -670,7 +670,7 @@ TEST(OptionsTest, StringToMapRandomTest) { } #endif // !ROCKSDB_LITE -TEST(OptionsTest, ConvertOptionsTest) { +TEST_F(OptionsTest, ConvertOptionsTest) { LevelDBOptions leveldb_opt; Options converted_opt = ConvertOptions(leveldb_opt); @@ -701,8 +701,9 @@ TEST(OptionsTest, ConvertOptionsTest) { } // namespace rocksdb int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); #ifdef GFLAGS ParseCommandLineFlags(&argc, &argv, true); #endif // GFLAGS - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index 269582ff1..ccce84dda 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -20,14 +20,13 @@ namespace rocksdb { -class RateLimiterTest { -}; +class RateLimiterTest : public testing::Test {}; -TEST(RateLimiterTest, StartStop) { +TEST_F(RateLimiterTest, StartStop) { std::unique_ptr limiter(new GenericRateLimiter(100, 100, 10)); } -TEST(RateLimiterTest, Rate) { +TEST_F(RateLimiterTest, Rate) { auto* env = Env::Default(); struct Arg { Arg(int32_t _target_rate, int _burst) @@ -80,5 +79,6 @@ TEST(RateLimiterTest, Rate) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/slice_transform_test.cc b/util/slice_transform_test.cc index b5e0665e7..5b7c1b402 100644 --- a/util/slice_transform_test.cc +++ b/util/slice_transform_test.cc @@ -18,9 +18,9 @@ namespace rocksdb { -class SliceTransformTest {}; +class SliceTransformTest : public testing::Test {}; -TEST(SliceTransformTest, CapPrefixTransform) { +TEST_F(SliceTransformTest, CapPrefixTransform) { std::string s; s = "abcdefge"; @@ -45,7 +45,7 @@ TEST(SliceTransformTest, CapPrefixTransform) { ASSERT_EQ(transform->Transform("").ToString(), ""); } -class SliceTransformDBTest { +class SliceTransformDBTest : public testing::Test { private: std::string dbname_; Env* env_; @@ -96,7 +96,7 @@ uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) { } } // namespace -TEST(SliceTransformDBTest, CapPrefix) { +TEST_F(SliceTransformDBTest, CapPrefix) { last_options_.prefix_extractor.reset(NewCappedPrefixTransform(8)); last_options_.statistics = rocksdb::CreateDBStatistics(); BlockBasedTableOptions bbto; @@ -147,4 +147,7 @@ TEST(SliceTransformDBTest, CapPrefix) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/sst_dump_test.cc b/util/sst_dump_test.cc index 33c78c7a4..ece74cab9 100644 --- a/util/sst_dump_test.cc +++ b/util/sst_dump_test.cc @@ -74,7 +74,7 @@ void cleanup(const std::string& file_name) { } // namespace // Test for sst dump tool "raw" mode -class SSTDumpToolTest { +class SSTDumpToolTest : public testing::Test { public: BlockBasedTableOptions table_options_; @@ -83,7 +83,7 @@ class SSTDumpToolTest { ~SSTDumpToolTest() {} }; -TEST(SSTDumpToolTest, EmptyFilter) { +TEST_F(SSTDumpToolTest, EmptyFilter) { std::string file_name = "rocksdb_sst_test.sst"; createSST(file_name, table_options_); @@ -104,7 +104,7 @@ TEST(SSTDumpToolTest, EmptyFilter) { } } -TEST(SSTDumpToolTest, FilterBlock) { +TEST_F(SSTDumpToolTest, FilterBlock) { table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); std::string file_name = "rocksdb_sst_test.sst"; createSST(file_name, table_options_); @@ -126,7 +126,7 @@ TEST(SSTDumpToolTest, FilterBlock) { } } -TEST(SSTDumpToolTest, FullFilterBlock) { +TEST_F(SSTDumpToolTest, FullFilterBlock) { table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); std::string file_name = "rocksdb_sst_test.sst"; createSST(file_name, table_options_); @@ -148,7 +148,7 @@ TEST(SSTDumpToolTest, FullFilterBlock) { } } -TEST(SSTDumpToolTest, GetProperties) { +TEST_F(SSTDumpToolTest, GetProperties) { table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); std::string file_name = "rocksdb_sst_test.sst"; createSST(file_name, table_options_); @@ -171,4 +171,7 @@ TEST(SSTDumpToolTest, GetProperties) { } } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/testharness.h b/util/testharness.h index 9610e5fad..866c88c10 100644 --- a/util/testharness.h +++ b/util/testharness.h @@ -9,6 +9,8 @@ #pragma once +#include + #include #include #include @@ -22,22 +24,6 @@ namespace rocksdb { namespace test { -// Run some of the tests registered by the TEST() macro. If the -// environment variable "ROCKSDB_TESTS" and "ROCKSDB_TESTS_FROM" -// are not set, runs all tests. Otherwise, run all tests after -// ROCKSDB_TESTS_FROM and those specified by ROCKSDB_TESTS. -// Partial name match also works for ROCKSDB_TESTS and -// ROCKSDB_TESTS_FROM. E.g., suppose the tests are: -// TEST(Foo, Hello) { ... } -// TEST(Foo, World) { ... } -// ROCKSDB_TESTS=Hello will run the first test -// ROCKSDB_TESTS=o will run both tests -// ROCKSDB_TESTS=Junk will run no tests -// -// Returns 0 if all tests pass. -// Dies or returns a non-zero value if some test fails. -extern int RunAllTests(); - // Return the directory to use for temporary storage. extern std::string TmpDir(Env* env = Env::Default()); @@ -46,166 +32,10 @@ extern std::string TmpDir(Env* env = Env::Default()); // runs may be able to vary the seed. extern int RandomSeed(); -class TesterHelper; - -// An instance of Tester is allocated to hold temporary state during -// the execution of an assertion. -class Tester { - friend class TesterHelper; - - private: - bool ok_; - std::stringstream ss_; - - public: - Tester() : ok_(true) {} - - Tester& Is(bool b, const char* msg) { - if (!b) { - ss_ << " Assertion failure " << msg; - ok_ = false; - } - return *this; - } - - Tester& IsOk(const Status& s) { - if (!s.ok()) { - ss_ << " " << s.ToString(); - ok_ = false; - } - return *this; - } - - Tester& IsNotOk(const Status& s) { - if (s.ok()) { - ss_ << " Error status expected"; - ok_ = false; - } - return *this; - } - -#define BINARY_OP(name,op) \ - template \ - Tester& name(const X& x, const Y& y) { \ - if (! (x op y)) { \ - ss_ << " failed: " << x << (" " #op " ") << y; \ - ok_ = false; \ - } \ - return *this; \ - } - - BINARY_OP(IsEq, ==) - BINARY_OP(IsNe, !=) - BINARY_OP(IsGe, >=) - BINARY_OP(IsGt, >) - BINARY_OP(IsLe, <=) - BINARY_OP(IsLt, <) -#undef BINARY_OP - - // Attach the specified value to the error message if an error has occurred - template - Tester& operator<<(const V& value) { - if (!ok_) { - ss_ << " " << value; - } - return *this; - } - - operator bool() const { return ok_; } -}; - -class TesterHelper { - private: - const char* fname_; - int line_; - - public: - TesterHelper(const char* f, int l) : fname_(f), line_(l) {} - - void operator=(const Tester& tester) { - fprintf(stderr, "%s:%d:%s\n", fname_, line_, tester.ss_.str().c_str()); - port::PrintStack(2); - exit(1); - } -}; - -// This is trying to solve: -// * Evaluate expression -// * Abort the test if the evaluation is not successful with the evaluation -// details. -// * Support operator << with ASSERT* for extra messages provided by the user -// code of ASSERT* -// -// For the third, we need to make sure that an expression at the end of macro -// supports << operator. But since we can have multiple of << we cannot abort -// inside implementation of operator <<, as we may miss some extra message. That -// is why there is TesterHelper with operator = which has lower precedence then -// operator <<, and it will be called after all messages from use code are -// accounted by <<. -// -// operator bool is added to Tester to make possible its declaration inside if -// statement and do not pollute its outer scope with the name tester. But in C++ -// we cannot do any other operations inside if statement besides declaration. -// Then in order to get inside if body there are two options: make operator -// Tester::bool return true if ok_ == false or put the body into else part. -#define TEST_EXPRESSION_(expression) \ - if (::rocksdb::test::Tester& tester = (expression)) \ - ; \ - else \ - ::rocksdb::test::TesterHelper(__FILE__, __LINE__) = tester - -#define ASSERT_TRUE(c) TEST_EXPRESSION_(::rocksdb::test::Tester().Is((c), #c)) -#define ASSERT_OK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsOk((s))) -#define ASSERT_NOK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsNotOk((s))) -#define ASSERT_EQ(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsEq((a), (b))) -#define ASSERT_NE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsNe((a), (b))) -#define ASSERT_GE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsGe((a), (b))) -#define ASSERT_GT(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsGt((a), (b))) -#define ASSERT_LE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsLe((a), (b))) -#define ASSERT_LT(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsLt((a), (b))) - -#define EXPECT_TRUE(c) TEST_EXPRESSION_(::rocksdb::test::Tester().Is((c), #c)) -#define EXPECT_OK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsOk((s))) -#define EXPECT_NOK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsNotOk((s))) -#define EXPECT_EQ(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsEq((a), (b))) -#define EXPECT_NE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsNe((a), (b))) -#define EXPECT_GE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsGe((a), (b))) -#define EXPECT_GT(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsGt((a), (b))) -#define EXPECT_LE(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsLe((a), (b))) -#define EXPECT_LT(a, b) \ - TEST_EXPRESSION_(::rocksdb::test::Tester().IsLt((a), (b))) - -#define TCONCAT(a, b) TCONCAT1(a, b) -#define TCONCAT1(a, b) a##b - -#define TEST(base, name) \ - class TCONCAT(_Test_, name) : public base { \ - public: \ - void _Run(); \ - static void _RunIt() { \ - TCONCAT(_Test_, name) t; \ - t._Run(); \ - } \ - }; \ - bool TCONCAT(_Test_ignored_, name) __attribute__((__unused__)) \ - = ::rocksdb::test::RegisterTest(#base, #name, \ - &TCONCAT(_Test_, name)::_RunIt); \ - void TCONCAT(_Test_, name)::_Run() - -// Register the specified test. Typically not used directly, but -// invoked via the macro expansion of TEST. -extern bool RegisterTest(const char* base, const char* name, void (*func)()); +#define ASSERT_OK(s) ASSERT_TRUE(((s).ok())) +#define ASSERT_NOK(s) ASSERT_FALSE(((s).ok())) +#define EXPECT_OK(s) EXPECT_TRUE(((s).ok())) +#define EXPECT_NOK(s) EXPECT_FALSE(((s).ok())) } // namespace test } // namespace rocksdb diff --git a/util/thread_list_test.cc b/util/thread_list_test.cc index 7a1bd22b8..eeb2b1688 100644 --- a/util/thread_list_test.cc +++ b/util/thread_list_test.cc @@ -88,13 +88,13 @@ class SimulatedBackgroundTask { std::atomic running_count_; }; -class ThreadListTest { +class ThreadListTest : public testing::Test { public: ThreadListTest() { } }; -TEST(ThreadListTest, GlobalTables) { +TEST_F(ThreadListTest, GlobalTables) { // verify the global tables for operations and states are properly indexed. for (int type = 0; type != ThreadStatus::NUM_OP_TYPES; ++type) { ASSERT_EQ(global_operation_table[type].type, type); @@ -118,7 +118,7 @@ TEST(ThreadListTest, GlobalTables) { } } -TEST(ThreadListTest, SimpleColumnFamilyInfoTest) { +TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { Env* env = Env::Default(); const int kHighPriorityThreads = 3; const int kLowPriorityThreads = 5; @@ -211,7 +211,7 @@ namespace { } } // namespace -TEST(ThreadListTest, SimpleEventTest) { +TEST_F(ThreadListTest, SimpleEventTest) { Env* env = Env::Default(); // simulated tasks @@ -338,12 +338,14 @@ TEST(ThreadListTest, SimpleEventTest) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } #else int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); return 0; } diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 155ef243c..49e7775b8 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -14,7 +14,7 @@ namespace rocksdb { -class ThreadLocalTest { +class ThreadLocalTest : public testing::Test { public: ThreadLocalTest() : env_(Env::Default()) {} @@ -54,7 +54,7 @@ class IDChecker : public ThreadLocalPtr { } // anonymous namespace -TEST(ThreadLocalTest, UniqueIdTest) { +TEST_F(ThreadLocalTest, UniqueIdTest) { port::Mutex mu; port::CondVar cv(&mu); @@ -101,7 +101,7 @@ TEST(ThreadLocalTest, UniqueIdTest) { // 3, 1, 2, 0 } -TEST(ThreadLocalTest, SequentialReadWriteTest) { +TEST_F(ThreadLocalTest, SequentialReadWriteTest) { // global id list carries over 3, 1, 2, 0 ASSERT_EQ(IDChecker::PeekId(), 0u); @@ -145,7 +145,7 @@ TEST(ThreadLocalTest, SequentialReadWriteTest) { } } -TEST(ThreadLocalTest, ConcurrentReadWriteTest) { +TEST_F(ThreadLocalTest, ConcurrentReadWriteTest) { // global id list carries over 3, 1, 2, 0 ASSERT_EQ(IDChecker::PeekId(), 0u); @@ -229,7 +229,7 @@ TEST(ThreadLocalTest, ConcurrentReadWriteTest) { ASSERT_EQ(IDChecker::PeekId(), 3u); } -TEST(ThreadLocalTest, Unref) { +TEST_F(ThreadLocalTest, Unref) { ASSERT_EQ(IDChecker::PeekId(), 0u); auto unref = [](void* ptr) { @@ -372,7 +372,7 @@ TEST(ThreadLocalTest, Unref) { } } -TEST(ThreadLocalTest, Swap) { +TEST_F(ThreadLocalTest, Swap) { ThreadLocalPtr tls; tls.Reset(reinterpret_cast(1)); ASSERT_EQ(reinterpret_cast(tls.Swap(nullptr)), 1); @@ -381,7 +381,7 @@ TEST(ThreadLocalTest, Swap) { ASSERT_EQ(reinterpret_cast(tls.Swap(reinterpret_cast(3))), 2); } -TEST(ThreadLocalTest, Scrape) { +TEST_F(ThreadLocalTest, Scrape) { auto unref = [](void* ptr) { auto& p = *static_cast(ptr); p.mu->Lock(); @@ -449,7 +449,7 @@ TEST(ThreadLocalTest, Scrape) { } } -TEST(ThreadLocalTest, CompareAndSwap) { +TEST_F(ThreadLocalTest, CompareAndSwap) { ThreadLocalPtr tls; ASSERT_TRUE(tls.Swap(reinterpret_cast(1)) == nullptr); void* expected = reinterpret_cast(1); @@ -468,5 +468,6 @@ TEST(ThreadLocalTest, CompareAndSwap) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 2b0174bca..658082c41 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -351,7 +351,7 @@ static void AssertEmpty(DB* db, int from, int to) { } } -class BackupableDBTest { +class BackupableDBTest : public testing::Test { public: BackupableDBTest() { // set up files @@ -495,7 +495,7 @@ void AppendPath(const std::string& path, std::vector& v) { } // this will make sure that backup does not copy the same file twice -TEST(BackupableDBTest, NoDoubleCopy) { +TEST_F(BackupableDBTest, NoDoubleCopy) { OpenBackupableDB(true, true); // should write 5 DB files + LATEST_BACKUP + one meta file @@ -566,7 +566,7 @@ TEST(BackupableDBTest, NoDoubleCopy) { // fine // 4. Corrupted checksum value - if the checksum is not a valid uint32_t, // db open should fail, otherwise, it aborts during the restore process. -TEST(BackupableDBTest, CorruptionsTest) { +TEST_F(BackupableDBTest, CorruptionsTest) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -676,7 +676,7 @@ TEST(BackupableDBTest, CorruptionsTest) { // This test verifies we don't delete the latest backup when read-only option is // set -TEST(BackupableDBTest, NoDeleteWithReadOnly) { +TEST_F(BackupableDBTest, NoDeleteWithReadOnly) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -708,7 +708,7 @@ TEST(BackupableDBTest, NoDeleteWithReadOnly) { } // open DB, write, close DB, backup, restore, repeat -TEST(BackupableDBTest, OfflineIntegrationTest) { +TEST_F(BackupableDBTest, OfflineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -755,7 +755,7 @@ TEST(BackupableDBTest, OfflineIntegrationTest) { } // open DB, write, backup, write, backup, close, restore -TEST(BackupableDBTest, OnlineIntegrationTest) { +TEST_F(BackupableDBTest, OnlineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -818,7 +818,7 @@ TEST(BackupableDBTest, OnlineIntegrationTest) { CloseRestoreDB(); } -TEST(BackupableDBTest, FailOverwritingBackups) { +TEST_F(BackupableDBTest, FailOverwritingBackups) { options_.write_buffer_size = 1024 * 1024 * 1024; // 1GB // create backups 1, 2, 3, 4, 5 OpenBackupableDB(true); @@ -853,7 +853,7 @@ TEST(BackupableDBTest, FailOverwritingBackups) { CloseBackupableDB(); } -TEST(BackupableDBTest, NoShareTableFiles) { +TEST_F(BackupableDBTest, NoShareTableFiles) { const int keys_iteration = 5000; OpenBackupableDB(true, false, false); for (int i = 0; i < 5; ++i) { @@ -869,7 +869,7 @@ TEST(BackupableDBTest, NoShareTableFiles) { } // Verify that you can backup and restore with share_files_with_checksum on -TEST(BackupableDBTest, ShareTableFilesWithChecksums) { +TEST_F(BackupableDBTest, ShareTableFilesWithChecksums) { const int keys_iteration = 5000; OpenBackupableDB(true, false, true, true); for (int i = 0; i < 5; ++i) { @@ -886,7 +886,7 @@ TEST(BackupableDBTest, ShareTableFilesWithChecksums) { // Verify that you can backup and restore using share_files_with_checksum set to // false and then transition this option to true -TEST(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { +TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { const int keys_iteration = 5000; // set share_files_with_checksum to false OpenBackupableDB(true, false, true, false); @@ -915,7 +915,7 @@ TEST(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { } } -TEST(BackupableDBTest, DeleteTmpFiles) { +TEST_F(BackupableDBTest, DeleteTmpFiles) { OpenBackupableDB(); CloseBackupableDB(); std::string shared_tmp = backupdir_ + "/shared/00006.sst.tmp"; @@ -934,7 +934,7 @@ TEST(BackupableDBTest, DeleteTmpFiles) { ASSERT_EQ(false, file_manager_->FileExists(private_tmp_dir)); } -TEST(BackupableDBTest, KeepLogFiles) { +TEST_F(BackupableDBTest, KeepLogFiles) { backupable_options_->backup_log_files = false; // basically infinite options_.WAL_ttl_seconds = 24 * 60 * 60; @@ -955,7 +955,7 @@ TEST(BackupableDBTest, KeepLogFiles) { AssertBackupConsistency(0, 0, 500, 600, true); } -TEST(BackupableDBTest, RateLimiting) { +TEST_F(BackupableDBTest, RateLimiting) { uint64_t const KB = 1024 * 1024; size_t const kMicrosPerSec = 1000 * 1000LL; @@ -994,7 +994,7 @@ TEST(BackupableDBTest, RateLimiting) { } } -TEST(BackupableDBTest, ReadOnlyBackupEngine) { +TEST_F(BackupableDBTest, ReadOnlyBackupEngine) { DestroyDB(dbname_, Options()); OpenBackupableDB(true); FillDB(db_.get(), 0, 100); @@ -1031,5 +1031,6 @@ TEST(BackupableDBTest, ReadOnlyBackupEngine) { } // namespace rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/utilities/document/document_db_test.cc b/utilities/document/document_db_test.cc index ff25eefee..d02b58f8d 100644 --- a/utilities/document/document_db_test.cc +++ b/utilities/document/document_db_test.cc @@ -13,7 +13,7 @@ namespace rocksdb { -class DocumentDBTest { +class DocumentDBTest : public testing::Test { public: DocumentDBTest() { dbname_ = test::TmpDir() + "/document_db_test"; @@ -64,7 +64,7 @@ class DocumentDBTest { DocumentDB* db_; }; -TEST(DocumentDBTest, SimpleQueryTest) { +TEST_F(DocumentDBTest, SimpleQueryTest) { DocumentDBOptions options; DocumentDB::IndexDescriptor index; index.description = Parse("{\"name\": 1}"); @@ -136,7 +136,7 @@ TEST(DocumentDBTest, SimpleQueryTest) { } } -TEST(DocumentDBTest, ComplexQueryTest) { +TEST_F(DocumentDBTest, ComplexQueryTest) { DocumentDBOptions options; DocumentDB::IndexDescriptor priority_index; priority_index.description = Parse("{'priority': 1}"); @@ -318,4 +318,7 @@ TEST(DocumentDBTest, ComplexQueryTest) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/utilities/document/json_document_test.cc b/utilities/document/json_document_test.cc index 17646cb85..d15cd0cf6 100644 --- a/utilities/document/json_document_test.cc +++ b/utilities/document/json_document_test.cc @@ -48,7 +48,7 @@ void AssertField(const JSONDocument& json, const std::string& field, } } // namespace -class JSONDocumentTest { +class JSONDocumentTest : public testing::Test { public: JSONDocumentTest() : rnd_(101) @@ -104,14 +104,14 @@ class JSONDocumentTest { Random rnd_; }; -TEST(JSONDocumentTest, MakeNullTest) { +TEST_F(JSONDocumentTest, MakeNullTest) { JSONDocument x; ASSERT_TRUE(x.IsNull()); ASSERT_TRUE(x.IsOwner()); ASSERT_TRUE(!x.IsBool()); } -TEST(JSONDocumentTest, MakeBoolTest) { +TEST_F(JSONDocumentTest, MakeBoolTest) { { JSONDocument x(true); ASSERT_TRUE(x.IsOwner()); @@ -129,7 +129,7 @@ TEST(JSONDocumentTest, MakeBoolTest) { } } -TEST(JSONDocumentTest, MakeInt64Test) { +TEST_F(JSONDocumentTest, MakeInt64Test) { JSONDocument x(static_cast(16)); ASSERT_TRUE(x.IsInt64()); ASSERT_TRUE(x.IsInt64()); @@ -138,7 +138,7 @@ TEST(JSONDocumentTest, MakeInt64Test) { ASSERT_EQ(x.GetInt64(), 16); } -TEST(JSONDocumentTest, MakeStringTest) { +TEST_F(JSONDocumentTest, MakeStringTest) { JSONDocument x("string"); ASSERT_TRUE(x.IsOwner()); ASSERT_TRUE(x.IsString()); @@ -146,7 +146,7 @@ TEST(JSONDocumentTest, MakeStringTest) { ASSERT_EQ(x.GetString(), "string"); } -TEST(JSONDocumentTest, MakeDoubleTest) { +TEST_F(JSONDocumentTest, MakeDoubleTest) { JSONDocument x(5.6); ASSERT_TRUE(x.IsOwner()); ASSERT_TRUE(x.IsDouble()); @@ -154,7 +154,7 @@ TEST(JSONDocumentTest, MakeDoubleTest) { ASSERT_EQ(x.GetDouble(), 5.6); } -TEST(JSONDocumentTest, MakeByTypeTest) { +TEST_F(JSONDocumentTest, MakeByTypeTest) { { JSONDocument x(JSONDocument::kNull); ASSERT_TRUE(x.IsNull()); @@ -185,7 +185,7 @@ TEST(JSONDocumentTest, MakeByTypeTest) { } } -TEST(JSONDocumentTest, Parsing) { +TEST_F(JSONDocumentTest, Parsing) { std::unique_ptr parsed_json( JSONDocument::ParseJSON(kSampleJSON.c_str())); ASSERT_TRUE(parsed_json->IsOwner()); @@ -208,7 +208,7 @@ TEST(JSONDocumentTest, Parsing) { ASSERT_TRUE(JSONDocument::ParseJSON(kFaultyJSON.c_str()) == nullptr); } -TEST(JSONDocumentTest, Serialization) { +TEST_F(JSONDocumentTest, Serialization) { std::unique_ptr parsed_json( JSONDocument::ParseJSON(kSampleJSON.c_str())); ASSERT_TRUE(parsed_json != nullptr); @@ -226,7 +226,7 @@ TEST(JSONDocumentTest, Serialization) { Slice(serialized.data(), serialized.size() - 10)) == nullptr); } -TEST(JSONDocumentTest, OperatorEqualsTest) { +TEST_F(JSONDocumentTest, OperatorEqualsTest) { // kNull ASSERT_TRUE(JSONDocument() == JSONDocument()); @@ -274,7 +274,7 @@ TEST(JSONDocumentTest, OperatorEqualsTest) { ASSERT_TRUE(JSONDocument(15.) == JSONDocument(15.)); } -TEST(JSONDocumentTest, JSONDocumentBuilderTest) { +TEST_F(JSONDocumentTest, JSONDocumentBuilderTest) { unique_ptr parsedArray( JSONDocument::ParseJSON("[1, [123, \"a\", \"b\"], {\"b\":\"c\"}]")); ASSERT_TRUE(parsedArray != nullptr); @@ -298,7 +298,7 @@ TEST(JSONDocumentTest, JSONDocumentBuilderTest) { ASSERT_TRUE(*parsedArray == builder.GetJSONDocument()); } -TEST(JSONDocumentTest, OwnershipTest) { +TEST_F(JSONDocumentTest, OwnershipTest) { std::unique_ptr parsed( JSONDocument::ParseJSON(kSampleJSON.c_str())); ASSERT_TRUE(parsed != nullptr); @@ -323,4 +323,7 @@ TEST(JSONDocumentTest, OwnershipTest) { } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/utilities/geodb/geodb_test.cc b/utilities/geodb/geodb_test.cc index ba3d36d7e..93fa1e196 100644 --- a/utilities/geodb/geodb_test.cc +++ b/utilities/geodb/geodb_test.cc @@ -11,7 +11,7 @@ namespace rocksdb { -class GeoDBTest { +class GeoDBTest : public testing::Test { public: static const std::string kDefaultDbName; static Options options; @@ -39,7 +39,7 @@ const std::string GeoDBTest::kDefaultDbName = "/tmp/geodefault"; Options GeoDBTest::options = Options(); // Insert, Get and Remove -TEST(GeoDBTest, SimpleTest) { +TEST_F(GeoDBTest, SimpleTest) { GeoPosition pos1(100, 101); std::string id1("id1"); std::string value1("value1"); @@ -90,7 +90,7 @@ TEST(GeoDBTest, SimpleTest) { // Search. // Verify distances via http://www.stevemorse.org/nearest/distance.php -TEST(GeoDBTest, Search) { +TEST_F(GeoDBTest, Search) { GeoPosition pos1(45, 45); std::string id1("mid1"); std::string value1 = "midvalue1"; @@ -119,5 +119,6 @@ TEST(GeoDBTest, Search) { } // namespace rocksdb int main(int argc, char* argv[]) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/utilities/merge_operators/string_append/stringappend_test.cc b/utilities/merge_operators/string_append/stringappend_test.cc index 09704c2e4..a0d137c8e 100644 --- a/utilities/merge_operators/string_append/stringappend_test.cc +++ b/utilities/merge_operators/string_append/stringappend_test.cc @@ -106,7 +106,7 @@ class StringLists { // The class for unit-testing -class StringAppendOperatorTest { +class StringAppendOperatorTest : public testing::Test { public: StringAppendOperatorTest() { DestroyDB(kDbName, Options()); // Start each test with a fresh DB @@ -127,7 +127,7 @@ StringAppendOperatorTest::OpenFuncPtr StringAppendOperatorTest::OpenDb = nullptr // THE TEST CASES BEGIN HERE -TEST(StringAppendOperatorTest, IteratorTest) { +TEST_F(StringAppendOperatorTest, IteratorTest) { auto db_ = OpenDb(','); StringLists slists(db_); @@ -220,7 +220,7 @@ TEST(StringAppendOperatorTest, IteratorTest) { } -TEST(StringAppendOperatorTest, SimpleTest) { +TEST_F(StringAppendOperatorTest, SimpleTest) { auto db = OpenDb(','); StringLists slists(db); @@ -235,7 +235,7 @@ TEST(StringAppendOperatorTest, SimpleTest) { ASSERT_EQ(res, "v1,v2,v3"); } -TEST(StringAppendOperatorTest, SimpleDelimiterTest) { +TEST_F(StringAppendOperatorTest, SimpleDelimiterTest) { auto db = OpenDb('|'); StringLists slists(db); @@ -248,7 +248,7 @@ TEST(StringAppendOperatorTest, SimpleDelimiterTest) { ASSERT_EQ(res, "v1|v2|v3"); } -TEST(StringAppendOperatorTest, OneValueNoDelimiterTest) { +TEST_F(StringAppendOperatorTest, OneValueNoDelimiterTest) { auto db = OpenDb('!'); StringLists slists(db); @@ -259,7 +259,7 @@ TEST(StringAppendOperatorTest, OneValueNoDelimiterTest) { ASSERT_EQ(res, "single_val"); } -TEST(StringAppendOperatorTest, VariousKeys) { +TEST_F(StringAppendOperatorTest, VariousKeys) { auto db = OpenDb('\n'); StringLists slists(db); @@ -285,7 +285,7 @@ TEST(StringAppendOperatorTest, VariousKeys) { } // Generate semi random keys/words from a small distribution. -TEST(StringAppendOperatorTest, RandomMixGetAppend) { +TEST_F(StringAppendOperatorTest, RandomMixGetAppend) { auto db = OpenDb(' '); StringLists slists(db); @@ -336,7 +336,7 @@ TEST(StringAppendOperatorTest, RandomMixGetAppend) { } -TEST(StringAppendOperatorTest, BIGRandomMixGetAppend) { +TEST_F(StringAppendOperatorTest, BIGRandomMixGetAppend) { auto db = OpenDb(' '); StringLists slists(db); @@ -387,8 +387,7 @@ TEST(StringAppendOperatorTest, BIGRandomMixGetAppend) { } - -TEST(StringAppendOperatorTest, PersistentVariousKeys) { +TEST_F(StringAppendOperatorTest, PersistentVariousKeys) { // Perform the following operations in limited scope { auto db = OpenDb('\n'); @@ -455,7 +454,7 @@ TEST(StringAppendOperatorTest, PersistentVariousKeys) { } } -TEST(StringAppendOperatorTest, PersistentFlushAndCompaction) { +TEST_F(StringAppendOperatorTest, PersistentFlushAndCompaction) { // Perform the following operations in limited scope { auto db = OpenDb('\n'); @@ -551,7 +550,7 @@ TEST(StringAppendOperatorTest, PersistentFlushAndCompaction) { } } -TEST(StringAppendOperatorTest, SimpleTestNullDelimiter) { +TEST_F(StringAppendOperatorTest, SimpleTestNullDelimiter) { auto db = OpenDb('\0'); StringLists slists(db); @@ -577,19 +576,20 @@ TEST(StringAppendOperatorTest, SimpleTestNullDelimiter) { } // namespace rocksdb int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); // Run with regular database int result; { fprintf(stderr, "Running tests with regular db and operator.\n"); StringAppendOperatorTest::SetOpenDbFunction(&OpenNormalDb); - result = rocksdb::test::RunAllTests(); + result = RUN_ALL_TESTS(); } // Run with TTL { fprintf(stderr, "Running tests with ttl db and generic operator.\n"); StringAppendOperatorTest::SetOpenDbFunction(&OpenTtlDb); - result |= rocksdb::test::RunAllTests(); + result |= RUN_ALL_TESTS(); } return result; diff --git a/utilities/redis/redis_lists_test.cc b/utilities/redis/redis_lists_test.cc index 302f02d7c..14ed31631 100644 --- a/utilities/redis/redis_lists_test.cc +++ b/utilities/redis/redis_lists_test.cc @@ -28,7 +28,7 @@ using namespace std; namespace rocksdb { -class RedisListsTest { +class RedisListsTest : public testing::Test { public: static const string kDefaultDbName; static Options options; @@ -55,7 +55,7 @@ void AssertListEq(const std::vector& result, } // namespace // PushRight, Length, Index, Range -TEST(RedisListsTest, SimpleTest) { +TEST_F(RedisListsTest, SimpleTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -84,7 +84,7 @@ TEST(RedisListsTest, SimpleTest) { } // PushLeft, Length, Index, Range -TEST(RedisListsTest, SimpleTest2) { +TEST_F(RedisListsTest, SimpleTest2) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -113,7 +113,7 @@ TEST(RedisListsTest, SimpleTest2) { } // Exhaustive test of the Index() function -TEST(RedisListsTest, IndexTest) { +TEST_F(RedisListsTest, IndexTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -172,7 +172,7 @@ TEST(RedisListsTest, IndexTest) { // Exhaustive test of the Range() function -TEST(RedisListsTest, RangeTest) { +TEST_F(RedisListsTest, RangeTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -255,7 +255,7 @@ TEST(RedisListsTest, RangeTest) { } // Exhaustive test for InsertBefore(), and InsertAfter() -TEST(RedisListsTest, InsertTest) { +TEST_F(RedisListsTest, InsertTest) { RedisLists redis(kDefaultDbName, options, true); string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -339,7 +339,7 @@ TEST(RedisListsTest, InsertTest) { } // Exhaustive test of Set function -TEST(RedisListsTest, SetTest) { +TEST_F(RedisListsTest, SetTest) { RedisLists redis(kDefaultDbName, options, true); string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -435,7 +435,7 @@ TEST(RedisListsTest, SetTest) { } // Testing Insert, Push, and Set, in a mixed environment -TEST(RedisListsTest, InsertPushSetTest) { +TEST_F(RedisListsTest, InsertPushSetTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -527,7 +527,7 @@ TEST(RedisListsTest, InsertPushSetTest) { } // Testing Trim, Pop -TEST(RedisListsTest, TrimPopTest) { +TEST_F(RedisListsTest, TrimPopTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -597,7 +597,7 @@ TEST(RedisListsTest, TrimPopTest) { } // Testing Remove, RemoveFirst, RemoveLast -TEST(RedisListsTest, RemoveTest) { +TEST_F(RedisListsTest, RemoveTest) { RedisLists redis(kDefaultDbName, options, true); // Destructive string tempv; // Used below for all Index(), PopRight(), PopLeft() @@ -688,8 +688,7 @@ TEST(RedisListsTest, RemoveTest) { // Test Multiple keys and Persistence -TEST(RedisListsTest, PersistenceMultiKeyTest) { - +TEST_F(RedisListsTest, PersistenceMultiKeyTest) { string tempv; // Used below for all Index(), PopRight(), PopLeft() // Block one: populate a single key in the database @@ -874,11 +873,12 @@ bool found_arg(int argc, char* argv[], const char* want){ // However, if -m is specified, it will do user manual/interactive testing // -m -d is manual and destructive (will clear the database before use) int main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); if (found_arg(argc, argv, "-m")) { bool destructive = found_arg(argc, argv, "-d"); return rocksdb::manual_redis_test(destructive); } else { - return rocksdb::test::RunAllTests(); + return RUN_ALL_TESTS(); } } diff --git a/utilities/spatialdb/spatial_db_test.cc b/utilities/spatialdb/spatial_db_test.cc index 0484f8c02..b304664d3 100644 --- a/utilities/spatialdb/spatial_db_test.cc +++ b/utilities/spatialdb/spatial_db_test.cc @@ -15,7 +15,7 @@ namespace rocksdb { namespace spatial { -class SpatialDBTest { +class SpatialDBTest : public testing::Test { public: SpatialDBTest() { dbname_ = test::TmpDir() + "/spatial_db_test"; @@ -46,7 +46,7 @@ class SpatialDBTest { SpatialDB* db_; }; -TEST(SpatialDBTest, FeatureSetSerializeTest) { +TEST_F(SpatialDBTest, FeatureSetSerializeTest) { FeatureSet fs; fs.Set("a", std::string("b")); @@ -93,7 +93,7 @@ TEST(SpatialDBTest, FeatureSetSerializeTest) { ASSERT_TRUE(!deserialized.Deserialize(serialized)); } -TEST(SpatialDBTest, TestNextID) { +TEST_F(SpatialDBTest, TestNextID) { ASSERT_OK(SpatialDB::Create( SpatialDBOptions(), dbname_, {SpatialIndexOptions("simple", BoundingBox(0, 0, 100, 100), 2)})); @@ -116,7 +116,7 @@ TEST(SpatialDBTest, TestNextID) { delete db_; } -TEST(SpatialDBTest, FeatureSetTest) { +TEST_F(SpatialDBTest, FeatureSetTest) { ASSERT_OK(SpatialDB::Create( SpatialDBOptions(), dbname_, {SpatialIndexOptions("simple", BoundingBox(0, 0, 100, 100), 2)})); @@ -150,7 +150,7 @@ TEST(SpatialDBTest, FeatureSetTest) { delete db_; } -TEST(SpatialDBTest, SimpleTest) { +TEST_F(SpatialDBTest, SimpleTest) { // iter 0 -- not read only // iter 1 -- read only for (int iter = 0; iter < 2; ++iter) { @@ -226,7 +226,7 @@ BoundingBox ScaleBB(BoundingBox b, double step) { } // namespace -TEST(SpatialDBTest, RandomizedTest) { +TEST_F(SpatialDBTest, RandomizedTest) { Random rnd(301); std::vector>> elements; @@ -268,4 +268,7 @@ TEST(SpatialDBTest, RandomizedTest) { } // namespace spatial } // namespace rocksdb -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 19e21f653..c970047bc 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -38,7 +38,7 @@ class SpecialTimeEnv : public EnvWrapper { int64_t current_time_; }; -class TtlTest { +class TtlTest : public testing::Test { public: TtlTest() { env_.reset(new SpecialTimeEnv(Env::Default())); @@ -372,7 +372,7 @@ class TtlTest { // This test opens the db 3 times with such default behavior and inserts a // bunch of kvs each time. All kvs should accumulate in the db till the end // Partitions the sample-size provided into 3 sets over boundary1 and boundary2 -TEST(TtlTest, NoEffect) { +TEST_F(TtlTest, NoEffect) { MakeKVMap(kSampleSize_); int64_t boundary1 = kSampleSize_ / 3; int64_t boundary2 = 2 * boundary1; @@ -394,7 +394,7 @@ TEST(TtlTest, NoEffect) { } // Puts a set of values and checks its presence using Get during ttl -TEST(TtlTest, PresentDuringTTL) { +TEST_F(TtlTest, PresentDuringTTL) { MakeKVMap(kSampleSize_); OpenTtl(2); // T=0:Open the db with ttl = 2 @@ -404,7 +404,7 @@ TEST(TtlTest, PresentDuringTTL) { } // Puts a set of values and checks its absence using Get after ttl -TEST(TtlTest, AbsentAfterTTL) { +TEST_F(TtlTest, AbsentAfterTTL) { MakeKVMap(kSampleSize_); OpenTtl(1); // T=0:Open the db with ttl = 2 @@ -415,7 +415,7 @@ TEST(TtlTest, AbsentAfterTTL) { // Resets the timestamp of a set of kvs by updating them and checks that they // are not deleted according to the old timestamp -TEST(TtlTest, ResetTimestamp) { +TEST_F(TtlTest, ResetTimestamp) { MakeKVMap(kSampleSize_); OpenTtl(3); @@ -427,7 +427,7 @@ TEST(TtlTest, ResetTimestamp) { } // Similar to PresentDuringTTL but uses Iterator -TEST(TtlTest, IterPresentDuringTTL) { +TEST_F(TtlTest, IterPresentDuringTTL) { MakeKVMap(kSampleSize_); OpenTtl(2); @@ -437,7 +437,7 @@ TEST(TtlTest, IterPresentDuringTTL) { } // Similar to AbsentAfterTTL but uses Iterator -TEST(TtlTest, IterAbsentAfterTTL) { +TEST_F(TtlTest, IterAbsentAfterTTL) { MakeKVMap(kSampleSize_); OpenTtl(1); @@ -448,7 +448,7 @@ TEST(TtlTest, IterAbsentAfterTTL) { // Checks presence while opening the same db more than once with the same ttl // Note: The second open will open the same db -TEST(TtlTest, MultiOpenSamePresent) { +TEST_F(TtlTest, MultiOpenSamePresent) { MakeKVMap(kSampleSize_); OpenTtl(2); @@ -462,7 +462,7 @@ TEST(TtlTest, MultiOpenSamePresent) { // Checks absence while opening the same db more than once with the same ttl // Note: The second open will open the same db -TEST(TtlTest, MultiOpenSameAbsent) { +TEST_F(TtlTest, MultiOpenSameAbsent) { MakeKVMap(kSampleSize_); OpenTtl(1); @@ -475,7 +475,7 @@ TEST(TtlTest, MultiOpenSameAbsent) { } // Checks presence while opening the same db more than once with bigger ttl -TEST(TtlTest, MultiOpenDifferent) { +TEST_F(TtlTest, MultiOpenDifferent) { MakeKVMap(kSampleSize_); OpenTtl(1); @@ -488,7 +488,7 @@ TEST(TtlTest, MultiOpenDifferent) { } // Checks presence during ttl in read_only mode -TEST(TtlTest, ReadOnlyPresentForever) { +TEST_F(TtlTest, ReadOnlyPresentForever) { MakeKVMap(kSampleSize_); OpenTtl(1); // T=0:Open the db normally @@ -502,7 +502,7 @@ TEST(TtlTest, ReadOnlyPresentForever) { // Checks whether WriteBatch works well with TTL // Puts all kvs in kvmap_ in a batch and writes first, then deletes first half -TEST(TtlTest, WriteBatchTest) { +TEST_F(TtlTest, WriteBatchTest) { MakeKVMap(kSampleSize_); BatchOperation batch_ops[kSampleSize_]; for (int i = 0; i < kSampleSize_; i++) { @@ -521,7 +521,7 @@ TEST(TtlTest, WriteBatchTest) { } // Checks user's compaction filter for correctness with TTL logic -TEST(TtlTest, CompactionFilter) { +TEST_F(TtlTest, CompactionFilter) { MakeKVMap(kSampleSize_); OpenTtlWithTestCompaction(1); @@ -541,7 +541,7 @@ TEST(TtlTest, CompactionFilter) { // Insert some key-values which KeyMayExist should be able to get and check that // values returned are fine -TEST(TtlTest, KeyMayExist) { +TEST_F(TtlTest, KeyMayExist) { MakeKVMap(kSampleSize_); OpenTtl(); @@ -552,7 +552,7 @@ TEST(TtlTest, KeyMayExist) { CloseTtl(); } -TEST(TtlTest, MultiGetTest) { +TEST_F(TtlTest, MultiGetTest) { MakeKVMap(kSampleSize_); OpenTtl(); @@ -563,7 +563,7 @@ TEST(TtlTest, MultiGetTest) { CloseTtl(); } -TEST(TtlTest, ColumnFamiliesTest) { +TEST_F(TtlTest, ColumnFamiliesTest) { DB* db; Options options; options.create_if_missing = true; @@ -624,5 +624,6 @@ TEST(TtlTest, ColumnFamiliesTest) { // A black-box test for the ttl wrapper around rocksdb int main(int argc, char** argv) { - return rocksdb::test::RunAllTests(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index f5d6a55a3..f31beaf9d 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -69,9 +69,9 @@ struct TestHandler : public WriteBatch::Handler { }; } // namespace anonymous -class WriteBatchWithIndexTest {}; +class WriteBatchWithIndexTest : public testing::Test {}; -TEST(WriteBatchWithIndexTest, TestValueAsSecondaryIndex) { +TEST_F(WriteBatchWithIndexTest, TestValueAsSecondaryIndex) { Entry entries[] = {{"aaa", "0005", kPutRecord}, {"b", "0002", kPutRecord}, {"cdd", "0002", kMergeRecord}, @@ -278,7 +278,7 @@ TEST(WriteBatchWithIndexTest, TestValueAsSecondaryIndex) { } } -TEST(WriteBatchWithIndexTest, TestComparatorForCF) { +TEST_F(WriteBatchWithIndexTest, TestComparatorForCF) { ColumnFamilyHandleImplDummy cf1(6, nullptr); ColumnFamilyHandleImplDummy reverse_cf(66, ReverseBytewiseComparator()); ColumnFamilyHandleImplDummy cf2(88, BytewiseComparator()); @@ -361,7 +361,7 @@ TEST(WriteBatchWithIndexTest, TestComparatorForCF) { } } -TEST(WriteBatchWithIndexTest, TestOverwriteKey) { +TEST_F(WriteBatchWithIndexTest, TestOverwriteKey) { ColumnFamilyHandleImplDummy cf1(6, nullptr); ColumnFamilyHandleImplDummy reverse_cf(66, ReverseBytewiseComparator()); ColumnFamilyHandleImplDummy cf2(88, BytewiseComparator()); @@ -515,7 +515,7 @@ void AssertItersEqual(Iterator* iter1, Iterator* iter2) { } } // namespace -TEST(WriteBatchWithIndexTest, TestRandomIteraratorWithBase) { +TEST_F(WriteBatchWithIndexTest, TestRandomIteraratorWithBase) { std::vector source_strings = {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}; for (int rand_seed = 301; rand_seed < 366; rand_seed++) { @@ -628,7 +628,7 @@ TEST(WriteBatchWithIndexTest, TestRandomIteraratorWithBase) { } } -TEST(WriteBatchWithIndexTest, TestIteraratorWithBase) { +TEST_F(WriteBatchWithIndexTest, TestIteraratorWithBase) { ColumnFamilyHandleImplDummy cf1(6, BytewiseComparator()); ColumnFamilyHandleImplDummy cf2(2, BytewiseComparator()); WriteBatchWithIndex batch(BytewiseComparator(), 20, true); @@ -791,7 +791,7 @@ TEST(WriteBatchWithIndexTest, TestIteraratorWithBase) { } } -TEST(WriteBatchWithIndexTest, TestIteraratorWithBaseReverseCmp) { +TEST_F(WriteBatchWithIndexTest, TestIteraratorWithBaseReverseCmp) { ColumnFamilyHandleImplDummy cf1(6, ReverseBytewiseComparator()); ColumnFamilyHandleImplDummy cf2(2, ReverseBytewiseComparator()); WriteBatchWithIndex batch(BytewiseComparator(), 20, true); @@ -879,4 +879,7 @@ TEST(WriteBatchWithIndexTest, TestIteraratorWithBaseReverseCmp) { } // namespace -int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); } +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}