diff --git a/db/column_family.cc b/db/column_family.cc index 1611125cd..05e0966bf 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -160,6 +160,11 @@ Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) { return Status::InvalidArgument( "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)"); } + if (cf_options.max_successive_merges != 0) { + return Status::InvalidArgument( + "max_successive_merges > 0 is incompatible " + "with concurrent writes (allow_concurrent_memtable_write)"); + } return Status::OK(); } diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 8358ddb56..114c7e811 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -150,6 +150,8 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnWrite) { options.create_if_missing = true; options.merge_operator.reset(new TestPutOperator()); options.max_successive_merges = 3; + // allow_concurrent_memtable_write is incompatible with max_successive_merges + options.allow_concurrent_memtable_write = false; options.env = env_; Reopen(options); ASSERT_OK(Merge("k1", "v1")); diff --git a/db/db_test2.cc b/db/db_test2.cc index b30dac6b5..464aa23a9 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -153,6 +153,8 @@ TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) { options.create_if_missing = true; options.statistics = rocksdb::CreateDBStatistics(); options.max_successive_merges = 3; + // allow_concurrent_memtable_write is incompatible with max_successive_merges + options.allow_concurrent_memtable_write = false; options.merge_operator = MergeOperators::CreatePutOperator(); options.disable_auto_compactions = true; DestroyAndReopen(options); diff --git a/db/merge_test.cc b/db/merge_test.cc index 2965045d9..228cb5e30 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -78,6 +78,9 @@ std::shared_ptr OpenDb(const std::string& dbname, const bool ttl = false, options.create_if_missing = true; options.merge_operator = std::make_shared(); options.max_successive_merges = max_successive_merges; + if (max_successive_merges > 0) { + options.allow_concurrent_memtable_write = false; + } Status s; DestroyDB(dbname, Options()); // DBWithTTL is not supported in ROCKSDB_LITE diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index c0ecb926a..3e070ee26 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -1946,6 +1946,7 @@ void StressTest::Open() { } s = TransactionDB::Open(options_, txn_db_options, FLAGS_db, cf_descriptors, &column_families_, &txn_db_); + assert(s.ok()); db_ = txn_db_; // after a crash, rollback to commit recovered transactions std::vector trans; diff --git a/test_util/testutil.cc b/test_util/testutil.cc index e309df088..a4be935f9 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -351,7 +351,8 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options, // size_t options cf_opt->arena_block_size = rnd->Uniform(10000); cf_opt->inplace_update_num_locks = rnd->Uniform(10000); - cf_opt->max_successive_merges = rnd->Uniform(10000); + cf_opt->max_successive_merges = + db_options.allow_concurrent_memtable_write ? 0 : rnd->Uniform(10000); cf_opt->memtable_huge_page_size = rnd->Uniform(10000); cf_opt->write_buffer_size = rnd->Uniform(10000); diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index da1edd185..72fd0b546 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -5820,7 +5820,7 @@ TEST_P(TransactionTest, DuplicateKeys) { } // do_rollback } // do_prepare - if (!options.unordered_write) { + if (!options.unordered_write && !options.allow_concurrent_memtable_write) { // Also test with max_successive_merges > 0. max_successive_merges will not // affect our algorithm for duplicate key insertion but we add the test to // verify that.