Converted db/merge_test.cc to use gtest (#4114)

Summary:
Picked up a task to convert this to use the gtest framework.  It can't be this simple, can it?

It works, but should all the std::cout be removed?

```
[$] ~/git/rocksdb [gft !]: ./merge_test
[==========] Running 2 tests from 1 test case.
[----------] Global test environment set-up.
[----------] 2 tests from MergeTest
[ RUN      ] MergeTest.MergeDbTest
Test read-modify-write counters...
a: 3
1
2
a: 3
b: 1225
3
Compaction started ...
Compaction ended
a: 3
b: 1225
Test merge-based counters...
a: 3
1
2
a: 3
b: 1225
3
Test merge in memtable...
a: 3
1
2
a: 3
b: 1225
3
Test Partial-Merge
Test merge-operator not set after reopen
[       OK ] MergeTest.MergeDbTest (93 ms)
[ RUN      ] MergeTest.MergeDbTtlTest
Opening database with TTL
Test read-modify-write counters...
a: 3
1
2
a: 3
b: 1225
3
Compaction started ...
Compaction ended
a: 3
b: 1225
Test merge-based counters...
a: 3
1
2
a: 3
b: 1225
3
Test merge in memtable...
Opening database with TTL
a: 3
1
2
a: 3
b: 1225
3
Test Partial-Merge
Opening database with TTL
Opening database with TTL
Opening database with TTL
Opening database with TTL
Test merge-operator not set after reopen
[       OK ] MergeTest.MergeDbTtlTest (97 ms)
[----------] 2 tests from MergeTest (190 ms total)

[----------] Global test environment tear-down
[==========] 2 tests from 1 test case ran. (190 ms total)
[  PASSED  ] 2 tests.
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4114

Differential Revision: D8822886

Pulled By: gfosco

fbshipit-source-id: c299d008e883c3bb911d2b357a2e9e4423f8e91a
main
Fosco Marotto 7 years ago committed by Facebook Github Bot
parent 537a233941
commit 8527012bb6
  1. 69
      db/merge_test.cc
  2. 2
      util/compression.h
  3. 4
      utilities/transactions/transaction_test.cc

@ -20,15 +20,17 @@
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
#include "util/testharness.h" #include "util/testharness.h"
using namespace rocksdb; namespace rocksdb {
bool use_compression;
class MergeTest : public testing::Test {};
namespace {
size_t num_merge_operator_calls; size_t num_merge_operator_calls;
void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; } void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; }
size_t num_partial_merge_calls; size_t num_partial_merge_calls;
void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; } void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; }
}
class CountMergeOperator : public AssociativeMergeOperator { class CountMergeOperator : public AssociativeMergeOperator {
public: public:
@ -74,7 +76,6 @@ class CountMergeOperator : public AssociativeMergeOperator {
std::shared_ptr<MergeOperator> mergeOperator_; std::shared_ptr<MergeOperator> mergeOperator_;
}; };
namespace {
std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false, std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
const size_t max_successive_merges = 0) { const size_t max_successive_merges = 0) {
DB* db; DB* db;
@ -87,7 +88,6 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
// DBWithTTL is not supported in ROCKSDB_LITE // DBWithTTL is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
if (ttl) { if (ttl) {
std::cout << "Opening database with TTL\n";
DBWithTTL* db_with_ttl; DBWithTTL* db_with_ttl;
s = DBWithTTL::Open(options, dbname, &db_with_ttl); s = DBWithTTL::Open(options, dbname, &db_with_ttl);
db = db_with_ttl; db = db_with_ttl;
@ -104,7 +104,6 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
} }
return std::shared_ptr<DB>(db); return std::shared_ptr<DB>(db);
} }
} // namespace
// Imagine we are maintaining a set of uint64 counters. // Imagine we are maintaining a set of uint64 counters.
// Each counter has a distinct name. And we would like // Each counter has a distinct name. And we would like
@ -246,12 +245,11 @@ class MergeBasedCounters : public Counters {
} }
}; };
namespace {
void dumpDb(DB* db) { void dumpDb(DB* db) {
auto it = unique_ptr<Iterator>(db->NewIterator(ReadOptions())); auto it = unique_ptr<Iterator>(db->NewIterator(ReadOptions()));
for (it->SeekToFirst(); it->Valid(); it->Next()) { for (it->SeekToFirst(); it->Valid(); it->Next()) {
uint64_t value = DecodeFixed64(it->value().data()); //uint64_t value = DecodeFixed64(it->value().data());
std::cout << it->key().ToString() << ": " << value << std::endl; //std::cout << it->key().ToString() << ": " << value << std::endl;
} }
assert(it->status().ok()); // Check for any errors found during the scan assert(it->status().ok()); // Check for any errors found during the scan
} }
@ -281,8 +279,6 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
dumpDb(db); dumpDb(db);
std::cout << "1\n";
// 1+...+49 = ? // 1+...+49 = ?
uint64_t sum = 0; uint64_t sum = 0;
for (int i = 1; i < 50; i++) { for (int i = 1; i < 50; i++) {
@ -291,17 +287,12 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
} }
assert(counters.assert_get("b") == sum); assert(counters.assert_get("b") == sum);
std::cout << "2\n";
dumpDb(db); dumpDb(db);
std::cout << "3\n";
if (test_compaction) { if (test_compaction) {
db->Flush(o); db->Flush(o);
std::cout << "Compaction started ...\n";
db->CompactRange(CompactRangeOptions(), nullptr, nullptr); db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
std::cout << "Compaction ended\n";
dumpDb(db); dumpDb(db);
@ -411,44 +402,35 @@ void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
static_cast<size_t>((num_merges % (max_num_merges + 1)))); static_cast<size_t>((num_merges % (max_num_merges + 1))));
} }
void runTest(int argc, const std::string& dbname, const bool use_ttl = false) { void runTest(const std::string& dbname, const bool use_ttl = false) {
bool compact = false;
if (argc > 1) {
compact = true;
std::cout << "Turn on Compaction\n";
}
{ {
auto db = OpenDb(dbname, use_ttl); auto db = OpenDb(dbname, use_ttl);
{ {
std::cout << "Test read-modify-write counters... \n";
Counters counters(db, 0); Counters counters(db, 0);
testCounters(counters, db.get(), true); testCounters(counters, db.get(), true);
} }
{ {
std::cout << "Test merge-based counters... \n";
MergeBasedCounters counters(db, 0); MergeBasedCounters counters(db, 0);
testCounters(counters, db.get(), compact); testCounters(counters, db.get(), use_compression);
} }
} }
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
{ {
std::cout << "Test merge in memtable... \n";
size_t max_merge = 5; size_t max_merge = 5;
auto db = OpenDb(dbname, use_ttl, max_merge); auto db = OpenDb(dbname, use_ttl, max_merge);
MergeBasedCounters counters(db, 0); MergeBasedCounters counters(db, 0);
testCounters(counters, db.get(), compact); testCounters(counters, db.get(), use_compression);
testSuccessiveMerge(counters, max_merge, max_merge * 2); testSuccessiveMerge(counters, max_merge, max_merge * 2);
testSingleBatchSuccessiveMerge(db.get(), 5, 7); testSingleBatchSuccessiveMerge(db.get(), 5, 7);
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
} }
{ {
std::cout << "Test Partial-Merge\n";
size_t max_merge = 100; size_t max_merge = 100;
// Min merge is hard-coded to 2. // Min merge is hard-coded to 2.
uint32_t min_merge = 2; uint32_t min_merge = 2;
@ -468,7 +450,6 @@ void runTest(int argc, const std::string& dbname, const bool use_ttl = false) {
} }
{ {
std::cout << "Test merge-operator not set after reopen\n";
{ {
auto db = OpenDb(dbname); auto db = OpenDb(dbname);
MergeBasedCounters counters(db, 0); MergeBasedCounters counters(db, 0);
@ -502,16 +483,28 @@ void runTest(int argc, const std::string& dbname, const bool use_ttl = false) {
} }
*/ */
} }
} // namespace
int main(int argc, char* /*argv*/ []) {
//TODO: Make this test like a general rocksdb unit-test TEST_F(MergeTest, MergeDbTest) {
rocksdb::port::InstallStackTraceHandler(); runTest(test::TmpDir() + "/merge_testdb");
runTest(argc, test::TmpDir() + "/merge_testdb"); }
// DBWithTTL is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
runTest(argc, test::TmpDir() + "/merge_testdbttl", true); // Run test on TTL database TEST_F(MergeTest, MergeDbTtlTest) {
runTest(test::TmpDir() + "/merge_testdbttl", true); // Run test on TTL database
}
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
printf("Passed all tests!\n");
return 0; } // namespace rocksdb
int main(int argc, char** argv) {
rocksdb::use_compression = false;
if (argc > 1) {
rocksdb::use_compression = true;
}
rocksdb::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} }

@ -121,6 +121,8 @@ class ZSTDUncompressCachedData {
int64_t GetCacheIndex() const { return -1; } int64_t GetCacheIndex() const { return -1; }
void CreateIfNeeded() {} void CreateIfNeeded() {}
void InitFromCache(const ZSTDUncompressCachedData&, int64_t) {} void InitFromCache(const ZSTDUncompressCachedData&, int64_t) {}
private:
void ignore_padding__() { padding = nullptr; }
}; };
} // namespace rocksdb } // namespace rocksdb
#endif #endif

@ -3211,7 +3211,7 @@ TEST_P(TransactionTest, LockLimitTest) {
ASSERT_OK(s); ASSERT_OK(s);
// Create a txn and verify we can only lock up to 3 keys // Create a txn and verify we can only lock up to 3 keys
Transaction* txn = db->BeginTransaction(write_options); Transaction* txn = db->BeginTransaction(write_options, txn_options);
ASSERT_TRUE(txn); ASSERT_TRUE(txn);
s = txn->Put("X", "x"); s = txn->Put("X", "x");
@ -3244,7 +3244,7 @@ TEST_P(TransactionTest, LockLimitTest) {
s = txn->Get(read_options, "W", &value); s = txn->Get(read_options, "W", &value);
ASSERT_TRUE(s.IsNotFound()); ASSERT_TRUE(s.IsNotFound());
Transaction* txn2 = db->BeginTransaction(write_options); Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
ASSERT_TRUE(txn2); ASSERT_TRUE(txn2);
// "X" currently locked // "X" currently locked

Loading…
Cancel
Save