Divide WriteCallbackTest.WriteWithCallbackTest (#7037)

Summary:
WriteCallbackTest.WriteWithCallbackTest has a deep for-loop and in some cases runs very long. Parameterimized it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7037

Test Plan: Run the test and see it passes.

Reviewed By: ltamasi

Differential Revision: D22269259

fbshipit-source-id: a1b6687b5bf4609754833d14cf383d68bc7ab27a
main
sdong 5 years ago committed by Facebook GitHub Bot
parent 2d1d51d385
commit 80b107a0a9
  1. 2
      db/db_impl/db_impl.h
  2. 489
      db/write_callback_test.cc

@ -1199,7 +1199,7 @@ class DBImpl : public DB {
friend class StatsHistoryTest_PersistentStatsCreateColumnFamilies_Test; friend class StatsHistoryTest_PersistentStatsCreateColumnFamilies_Test;
#ifndef NDEBUG #ifndef NDEBUG
friend class DBTest2_ReadCallbackTest_Test; friend class DBTest2_ReadCallbackTest_Test;
friend class WriteCallbackTest_WriteWithCallbackTest_Test; friend class WriteCallbackPTest_WriteWithCallbackTest_Test;
friend class XFTransactionWriteHandler; friend class XFTransactionWriteHandler;
friend class DBBlobIndexTest; friend class DBBlobIndexTest;
friend class WriteUnpreparedTransactionTest_RecoveryTest_Test; friend class WriteUnpreparedTransactionTest_RecoveryTest_Test;

@ -84,7 +84,28 @@ class MockWriteCallback : public WriteCallback {
bool AllowWriteBatching() override { return allow_batching_; } bool AllowWriteBatching() override { return allow_batching_; }
}; };
TEST_F(WriteCallbackTest, WriteWithCallbackTest) { class WriteCallbackPTest
: public WriteCallbackTest,
public ::testing::WithParamInterface<
std::tuple<bool, bool, bool, bool, bool, bool, bool>> {
public:
WriteCallbackPTest() {
std::tie(unordered_write_, seq_per_batch_, two_queues_, allow_parallel_,
allow_batching_, enable_WAL_, enable_pipelined_write_) =
GetParam();
}
protected:
bool unordered_write_;
bool seq_per_batch_;
bool two_queues_;
bool allow_parallel_;
bool allow_batching_;
bool enable_WAL_;
bool enable_pipelined_write_;
};
TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
struct WriteOP { struct WriteOP {
WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; } WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; }
@ -124,254 +145,238 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
{false, false, true, false, true}, {false, false, true, false, true},
}; };
for (auto& unordered_write : {true, false}) { for (auto& write_group : write_scenarios) {
for (auto& seq_per_batch : {true, false}) { Options options;
for (auto& two_queues : {true, false}) { options.create_if_missing = true;
for (auto& allow_parallel : {true, false}) { options.unordered_write = unordered_write_;
for (auto& allow_batching : {true, false}) { options.allow_concurrent_memtable_write = allow_parallel_;
for (auto& enable_WAL : {true, false}) { options.enable_pipelined_write = enable_pipelined_write_;
for (auto& enable_pipelined_write : {true, false}) { options.two_write_queues = two_queues_;
for (auto& write_group : write_scenarios) { // Skip unsupported combinations
Options options; if (options.enable_pipelined_write && seq_per_batch_) {
options.create_if_missing = true; continue;
options.unordered_write = unordered_write; }
options.allow_concurrent_memtable_write = allow_parallel; if (options.enable_pipelined_write && options.two_write_queues) {
options.enable_pipelined_write = enable_pipelined_write; continue;
options.two_write_queues = two_queues; }
// Skip unsupported combinations if (options.unordered_write && !options.allow_concurrent_memtable_write) {
if (options.enable_pipelined_write && seq_per_batch) { continue;
continue; }
} if (options.unordered_write && options.enable_pipelined_write) {
if (options.enable_pipelined_write && options.two_write_queues) { continue;
continue; }
}
if (options.unordered_write && ReadOptions read_options;
!options.allow_concurrent_memtable_write) { DB* db;
continue; DBImpl* db_impl;
}
if (options.unordered_write && options.enable_pipelined_write) { DestroyDB(dbname, options);
continue;
} DBOptions db_options(options);
ColumnFamilyOptions cf_options(options);
ReadOptions read_options; std::vector<ColumnFamilyDescriptor> column_families;
DB* db; column_families.push_back(
DBImpl* db_impl; ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
std::vector<ColumnFamilyHandle*> handles;
DestroyDB(dbname, options); auto open_s = DBImpl::Open(db_options, dbname, column_families, &handles,
&db, seq_per_batch_, true /* batch_per_txn */);
DBOptions db_options(options); ASSERT_OK(open_s);
ColumnFamilyOptions cf_options(options); assert(handles.size() == 1);
std::vector<ColumnFamilyDescriptor> column_families; delete handles[0];
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); db_impl = dynamic_cast<DBImpl*>(db);
std::vector<ColumnFamilyHandle*> handles; ASSERT_TRUE(db_impl);
auto open_s =
DBImpl::Open(db_options, dbname, column_families, &handles, // Writers that have called JoinBatchGroup.
&db, seq_per_batch, true /* batch_per_txn */); std::atomic<uint64_t> threads_joining(0);
ASSERT_OK(open_s); // Writers that have linked to the queue
assert(handles.size() == 1); std::atomic<uint64_t> threads_linked(0);
delete handles[0]; // Writers that pass WriteThread::JoinBatchGroup:Wait sync-point.
std::atomic<uint64_t> threads_verified(0);
db_impl = dynamic_cast<DBImpl*>(db);
ASSERT_TRUE(db_impl); std::atomic<uint64_t> seq(db_impl->GetLatestSequenceNumber());
ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0);
// Writers that have called JoinBatchGroup.
std::atomic<uint64_t> threads_joining(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
// Writers that have linked to the queue "WriteThread::JoinBatchGroup:Start", [&](void*) {
std::atomic<uint64_t> threads_linked(0); uint64_t cur_threads_joining = threads_joining.fetch_add(1);
// Writers that pass WriteThread::JoinBatchGroup:Wait sync-point. // Wait for the last joined writer to link to the queue.
std::atomic<uint64_t> threads_verified(0); // In this way the writers link to the queue one by one.
// This allows us to confidently detect the first writer
std::atomic<uint64_t> seq(db_impl->GetLatestSequenceNumber()); // who increases threads_linked as the leader.
ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0); while (threads_linked.load() < cur_threads_joining) {
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( });
"WriteThread::JoinBatchGroup:Start", [&](void*) {
uint64_t cur_threads_joining = threads_joining.fetch_add(1); // Verification once writers call JoinBatchGroup.
// Wait for the last joined writer to link to the queue. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
// In this way the writers link to the queue one by one. "WriteThread::JoinBatchGroup:Wait", [&](void* arg) {
// This allows us to confidently detect the first writer uint64_t cur_threads_linked = threads_linked.fetch_add(1);
// who increases threads_linked as the leader. bool is_leader = false;
while (threads_linked.load() < cur_threads_joining) { bool is_last = false;
}
}); // who am i
is_leader = (cur_threads_linked == 0);
// Verification once writers call JoinBatchGroup. is_last = (cur_threads_linked == write_group.size() - 1);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"WriteThread::JoinBatchGroup:Wait", [&](void* arg) { // check my state
uint64_t cur_threads_linked = threads_linked.fetch_add(1); auto* writer = reinterpret_cast<WriteThread::Writer*>(arg);
bool is_leader = false;
bool is_last = false; if (is_leader) {
ASSERT_TRUE(writer->state ==
// who am i WriteThread::State::STATE_GROUP_LEADER);
is_leader = (cur_threads_linked == 0); } else {
is_last = (cur_threads_linked == write_group.size() - 1); ASSERT_TRUE(writer->state == WriteThread::State::STATE_INIT);
}
// check my state
auto* writer = reinterpret_cast<WriteThread::Writer*>(arg); // (meta test) the first WriteOP should indeed be the first
// and the last should be the last (all others can be out of
if (is_leader) { // order)
ASSERT_TRUE(writer->state == if (is_leader) {
WriteThread::State::STATE_GROUP_LEADER); ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
} else { !write_group.front().callback_.should_fail_);
ASSERT_TRUE(writer->state == } else if (is_last) {
WriteThread::State::STATE_INIT); ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
} !write_group.back().callback_.should_fail_);
}
// (meta test) the first WriteOP should indeed be the first
// and the last should be the last (all others can be out of threads_verified.fetch_add(1);
// order) // Wait here until all verification in this sync-point
if (is_leader) { // callback finish for all writers.
ASSERT_TRUE(writer->callback->Callback(nullptr).ok() == while (threads_verified.load() < write_group.size()) {
!write_group.front().callback_.should_fail_); }
} else if (is_last) { });
ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
!write_group.back().callback_.should_fail_); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
} "WriteThread::JoinBatchGroup:DoneWaiting", [&](void* arg) {
// check my state
threads_verified.fetch_add(1); auto* writer = reinterpret_cast<WriteThread::Writer*>(arg);
// Wait here until all verification in this sync-point
// callback finish for all writers. if (!allow_batching_) {
while (threads_verified.load() < write_group.size()) { // no batching so everyone should be a leader
} ASSERT_TRUE(writer->state ==
}); WriteThread::State::STATE_GROUP_LEADER);
} else if (!allow_parallel_) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( ASSERT_TRUE(writer->state == WriteThread::State::STATE_COMPLETED ||
"WriteThread::JoinBatchGroup:DoneWaiting", [&](void* arg) { (enable_pipelined_write_ &&
// check my state writer->state ==
auto* writer = reinterpret_cast<WriteThread::Writer*>(arg); WriteThread::State::STATE_MEMTABLE_WRITER_LEADER));
}
if (!allow_batching) { });
// no batching so everyone should be a leader
ASSERT_TRUE(writer->state == std::atomic<uint32_t> thread_num(0);
WriteThread::State::STATE_GROUP_LEADER); std::atomic<char> dummy_key(0);
} else if (!allow_parallel) {
ASSERT_TRUE(writer->state == // Each write thread create a random write batch and write to DB
WriteThread::State::STATE_COMPLETED || // with a write callback.
(enable_pipelined_write && std::function<void()> write_with_callback_func = [&]() {
writer->state == uint32_t i = thread_num.fetch_add(1);
WriteThread::State:: Random rnd(i);
STATE_MEMTABLE_WRITER_LEADER));
} // leaders gotta lead
}); while (i > 0 && threads_verified.load() < 1) {
}
std::atomic<uint32_t> thread_num(0);
std::atomic<char> dummy_key(0); // loser has to lose
while (i == write_group.size() - 1 &&
// Each write thread create a random write batch and write to DB threads_verified.load() < write_group.size() - 1) {
// with a write callback. }
std::function<void()> write_with_callback_func = [&]() {
uint32_t i = thread_num.fetch_add(1); auto& write_op = write_group.at(i);
Random rnd(i); write_op.Clear();
write_op.callback_.allow_batching_ = allow_batching_;
// leaders gotta lead
while (i > 0 && threads_verified.load() < 1) { // insert some keys
} for (uint32_t j = 0; j < rnd.Next() % 50; j++) {
// grab unique key
// loser has to lose char my_key = dummy_key.fetch_add(1);
while (i == write_group.size() - 1 &&
threads_verified.load() < write_group.size() - 1) { string skey(5, my_key);
} string sval(10, my_key);
write_op.Put(skey, sval);
auto& write_op = write_group.at(i);
write_op.Clear(); if (!write_op.callback_.should_fail_ && !seq_per_batch_) {
write_op.callback_.allow_batching_ = allow_batching; seq.fetch_add(1);
}
// insert some keys }
for (uint32_t j = 0; j < rnd.Next() % 50; j++) { if (!write_op.callback_.should_fail_ && seq_per_batch_) {
// grab unique key seq.fetch_add(1);
char my_key = dummy_key.fetch_add(1); }
string skey(5, my_key); WriteOptions woptions;
string sval(10, my_key); woptions.disableWAL = !enable_WAL_;
write_op.Put(skey, sval); woptions.sync = enable_WAL_;
Status s;
if (!write_op.callback_.should_fail_ && !seq_per_batch) { if (seq_per_batch_) {
seq.fetch_add(1); class PublishSeqCallback : public PreReleaseCallback {
} public:
} PublishSeqCallback(DBImpl* db_impl_in) : db_impl_(db_impl_in) {}
if (!write_op.callback_.should_fail_ && seq_per_batch) { Status Callback(SequenceNumber last_seq, bool /*not used*/, uint64_t,
seq.fetch_add(1); size_t /*index*/, size_t /*total*/) override {
} db_impl_->SetLastPublishedSequence(last_seq);
return Status::OK();
WriteOptions woptions;
woptions.disableWAL = !enable_WAL;
woptions.sync = enable_WAL;
Status s;
if (seq_per_batch) {
class PublishSeqCallback : public PreReleaseCallback {
public:
PublishSeqCallback(DBImpl* db_impl_in)
: db_impl_(db_impl_in) {}
Status Callback(SequenceNumber last_seq, bool /*not used*/,
uint64_t, size_t /*index*/,
size_t /*total*/) override {
db_impl_->SetLastPublishedSequence(last_seq);
return Status::OK();
}
DBImpl* db_impl_;
} publish_seq_callback(db_impl);
// seq_per_batch requires a natural batch separator or Noop
WriteBatchInternal::InsertNoop(&write_op.write_batch_);
const size_t ONE_BATCH = 1;
s = db_impl->WriteImpl(
woptions, &write_op.write_batch_, &write_op.callback_,
nullptr, 0, false, nullptr, ONE_BATCH,
two_queues ? &publish_seq_callback : nullptr);
} else {
s = db_impl->WriteWithCallback(
woptions, &write_op.write_batch_, &write_op.callback_);
}
if (write_op.callback_.should_fail_) {
ASSERT_TRUE(s.IsBusy());
} else {
ASSERT_OK(s);
}
};
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// do all the writes
std::vector<port::Thread> threads;
for (uint32_t i = 0; i < write_group.size(); i++) {
threads.emplace_back(write_with_callback_func);
}
for (auto& t : threads) {
t.join();
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
// check for keys
string value;
for (auto& w : write_group) {
ASSERT_TRUE(w.callback_.was_called_.load());
for (auto& kvp : w.kvs_) {
if (w.callback_.should_fail_) {
ASSERT_TRUE(
db->Get(read_options, kvp.first, &value).IsNotFound());
} else {
ASSERT_OK(db->Get(read_options, kvp.first, &value));
ASSERT_EQ(value, kvp.second);
}
}
}
ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
delete db;
DestroyDB(dbname, options);
}
} }
DBImpl* db_impl_;
} publish_seq_callback(db_impl);
// seq_per_batch_ requires a natural batch separator or Noop
WriteBatchInternal::InsertNoop(&write_op.write_batch_);
const size_t ONE_BATCH = 1;
s = db_impl->WriteImpl(woptions, &write_op.write_batch_,
&write_op.callback_, nullptr, 0, false, nullptr,
ONE_BATCH,
two_queues_ ? &publish_seq_callback : nullptr);
} else {
s = db_impl->WriteWithCallback(woptions, &write_op.write_batch_,
&write_op.callback_);
}
if (write_op.callback_.should_fail_) {
ASSERT_TRUE(s.IsBusy());
} else {
ASSERT_OK(s);
}
};
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// do all the writes
std::vector<port::Thread> threads;
for (uint32_t i = 0; i < write_group.size(); i++) {
threads.emplace_back(write_with_callback_func);
}
for (auto& t : threads) {
t.join();
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
// check for keys
string value;
for (auto& w : write_group) {
ASSERT_TRUE(w.callback_.was_called_.load());
for (auto& kvp : w.kvs_) {
if (w.callback_.should_fail_) {
ASSERT_TRUE(db->Get(read_options, kvp.first, &value).IsNotFound());
} else {
ASSERT_OK(db->Get(read_options, kvp.first, &value));
ASSERT_EQ(value, kvp.second);
} }
} }
} }
}
} ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
delete db;
DestroyDB(dbname, options);
} }
} }
INSTANTIATE_TEST_CASE_P(WriteCallbackPTest, WriteCallbackPTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Bool(), ::testing::Bool(),
::testing::Bool(), ::testing::Bool(),
::testing::Bool()));
TEST_F(WriteCallbackTest, WriteCallBackTest) { TEST_F(WriteCallbackTest, WriteCallBackTest) {
Options options; Options options;
WriteOptions write_options; WriteOptions write_options;

Loading…
Cancel
Save