unordered_write incompatible with max_successive_merges (#6284)

Summary:
unordered_write is incompatible with non-zero max_successive_merges. Although we check this at runtime, we currently don't prevent the user from setting this combination in options. This has led to stress tests to fail with this combination is tried in ::SetOptions.
The patch fixes that and also reverts the changes performed by https://github.com/facebook/rocksdb/pull/6254, in which max_successive_merges was mistakenly declared incompatible with unordered_write.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6284

Differential Revision: D19356115

Pulled By: maysamyabandeh

fbshipit-source-id: f06dadec777622bd75f267361c022735cf8cecb6
main
Maysam Yabandeh 5 years ago committed by Facebook Github Bot
parent 687119aeaf
commit eff5e076f5
  1. 10
      db/column_family.cc
  2. 2
      db/db_merge_operator_test.cc
  3. 2
      db/db_test2.cc
  4. 3
      db/merge_test.cc
  5. 3
      test_util/testutil.cc
  6. 2
      utilities/transactions/transaction_test.cc

@ -160,11 +160,6 @@ Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) {
return Status::InvalidArgument(
"Memtable doesn't concurrent writes (allow_concurrent_memtable_write)");
}
if (cf_options.max_successive_merges != 0) {
return Status::InvalidArgument(
"max_successive_merges > 0 is incompatible "
"with concurrent writes (allow_concurrent_memtable_write)");
}
return Status::OK();
}
@ -1252,6 +1247,11 @@ Status ColumnFamilyData::ValidateOptions(
if (s.ok() && db_options.allow_concurrent_memtable_write) {
s = CheckConcurrentWritesSupported(cf_options);
}
if (s.ok() && db_options.unordered_write &&
cf_options.max_successive_merges != 0) {
s = Status::InvalidArgument(
"max_successive_merges > 0 is incompatible with unordered_write");
}
if (s.ok()) {
s = CheckCFPathsSupported(db_options, cf_options);
}

@ -150,8 +150,6 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnWrite) {
options.create_if_missing = true;
options.merge_operator.reset(new TestPutOperator());
options.max_successive_merges = 3;
// allow_concurrent_memtable_write is incompatible with max_successive_merges
options.allow_concurrent_memtable_write = false;
options.env = env_;
Reopen(options);
ASSERT_OK(Merge("k1", "v1"));

@ -153,8 +153,6 @@ TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) {
options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics();
options.max_successive_merges = 3;
// allow_concurrent_memtable_write is incompatible with max_successive_merges
options.allow_concurrent_memtable_write = false;
options.merge_operator = MergeOperators::CreatePutOperator();
options.disable_auto_compactions = true;
DestroyAndReopen(options);

@ -78,9 +78,6 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
options.create_if_missing = true;
options.merge_operator = std::make_shared<CountMergeOperator>();
options.max_successive_merges = max_successive_merges;
if (max_successive_merges > 0) {
options.allow_concurrent_memtable_write = false;
}
Status s;
DestroyDB(dbname, Options());
// DBWithTTL is not supported in ROCKSDB_LITE

@ -351,8 +351,7 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options,
// size_t options
cf_opt->arena_block_size = rnd->Uniform(10000);
cf_opt->inplace_update_num_locks = rnd->Uniform(10000);
cf_opt->max_successive_merges =
db_options.allow_concurrent_memtable_write ? 0 : rnd->Uniform(10000);
cf_opt->max_successive_merges = rnd->Uniform(10000);
cf_opt->memtable_huge_page_size = rnd->Uniform(10000);
cf_opt->write_buffer_size = rnd->Uniform(10000);

@ -5820,7 +5820,7 @@ TEST_P(TransactionTest, DuplicateKeys) {
} // do_rollback
} // do_prepare
if (!options.unordered_write && !options.allow_concurrent_memtable_write) {
if (!options.unordered_write) {
// Also test with max_successive_merges > 0. max_successive_merges will not
// affect our algorithm for duplicate key insertion but we add the test to
// verify that.

Loading…
Cancel
Save