From d2346c2cf0490de4823fd449c2082cd82a871cc2 Mon Sep 17 00:00:00 2001 From: Venkatesh Radhakrishnan Date: Fri, 1 May 2015 15:41:50 -0700 Subject: [PATCH] Fix hang with large write batches and column families. Summary: This diff fixes a hang reported by a Github user. https://www.facebook.com/l.php?u=https%3A%2F%2Fgithub.com%2Ffacebook%2Frocksdb%2Fissues%2F595%23issuecomment-96983273&h=9AQFYOWlo Multiple large write batches with column families cause a hang. The issue was caused by not doing flushes/compaction when the write controller was stopped. Test Plan: Create a DBTest from the user's test case Reviewers: igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D37929 --- db/db_impl.cc | 5 +++++ db/db_test.cc | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/db/db_impl.cc b/db/db_impl.cc index 146db6133..61f107428 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -3202,6 +3202,11 @@ Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) { if (UNLIKELY(status.ok()) && (write_controller_.IsStopped() || write_controller_.GetDelay() > 0)) { + // If writer is stopped, we need to get it going, + // so schedule flushes/compactions + if (context.schedule_bg_work_) { + MaybeScheduleFlushOrCompaction(); + } status = DelayWrite(expiration_time); } diff --git a/db/db_test.cc b/db/db_test.cc index 10f504eb3..1e7cbd243 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -12714,6 +12714,38 @@ TEST_F(DBTest, HugeNumberOfLevels) { ASSERT_OK(db_->CompactRange(nullptr, nullptr)); } +// Github issue #595 +// Large write batch with column families +TEST_F(DBTest, LargeBatchWithColumnFamilies) { + Options options; + options.env = env_; + options = CurrentOptions(options); + options.write_buffer_size = 100000; // Small write buffer + CreateAndReopenWithCF({"pikachu"}, options); + int64_t j = 0; + for (int i = 0; i < 5; i++) { + for (int pass = 1; pass <= 3; pass++) { + WriteBatch batch; + size_t write_size = 1024 * 1024 * (5 + i); + fprintf(stderr, "prepare: %ld MB, pass:%d\n", (write_size / 1024 / 1024), + pass); + for (;;) { + std::string data(3000, j++ % 127 + 20); + data += std::to_string(j); + batch.Put(handles_[0], Slice(data), Slice(data)); + if (batch.GetDataSize() > write_size) { + break; + } + } + fprintf(stderr, "write: %ld MB\n", (batch.GetDataSize() / 1024 / 1024)); + ASSERT_OK(dbfull()->Write(WriteOptions(), &batch)); + fprintf(stderr, "done\n"); + } + } + // make sure we can re-open it. + ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); +} + } // namespace rocksdb int main(int argc, char** argv) {