From 34f9da1ceff202921509ff055711cdc6e34ec577 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang Date: Tue, 25 Mar 2014 11:50:09 -0700 Subject: [PATCH 1/4] Fix the failure of stringappend_test caused by PartialMergeMulti. Summary: Fix a bug that PartialMergeMulti will try to merge the first operand with an empty slice. Test Plan: run stringappend_test and merge_test. Reviewers: haobo, igor Reviewed By: haobo CC: leveldb Differential Revision: https://reviews.facebook.net/D17157 --- db/merge_operator.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/db/merge_operator.cc b/db/merge_operator.cc index 43a8df371..d96b165a6 100644 --- a/db/merge_operator.cc +++ b/db/merge_operator.cc @@ -20,8 +20,10 @@ bool MergeOperator::PartialMergeMulti(const Slice& key, Logger* logger) const { // Simply loop through the operands std::string temp_value; - Slice temp_slice; - for (const auto& operand : operand_list) { + Slice temp_slice(operand_list[0]); + + for (int i = 1; i < operand_list.size(); ++i) { + auto& operand = operand_list[i]; if (!PartialMerge(key, temp_slice, operand, &temp_value, logger)) { return false; } From d9ca83df28c5df9b1f0d402f089b867c7c457938 Mon Sep 17 00:00:00 2001 From: Danny Guo Date: Tue, 25 Mar 2014 11:34:31 -0700 Subject: [PATCH 2/4] [rocksdb] make init prefix more robust Summary: Currently if client uses kNULLString as the prefix, it will confuse compaction filter v2. This diff added a bool to indicate if the prefix has been intialized. I also added a unit test to cover this case and make sure the new code path is hit. Test Plan: db_test Reviewers: igor, haobo Reviewed By: igor CC: leveldb Differential Revision: https://reviews.facebook.net/D17151 --- db/db_impl.cc | 4 +++- db/db_test.cc | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 95d228ccf..d1003bfdc 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2898,6 +2898,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, compact->CleanupBatchBuffer(); compact->CleanupMergedBuffer(); compact->cur_prefix_ = kNullString; + bool prefix_initialized = false; int64_t imm_micros = 0; // Micros spent doing imm_ compactions Log(options_.info_log, @@ -2986,8 +2987,9 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, const SliceTransform* transformer = options_.compaction_filter_factory_v2->GetPrefixExtractor(); std::string key_prefix = transformer->Transform(key).ToString(); - if (compact->cur_prefix_ == kNullString) { + if (!prefix_initialized) { compact->cur_prefix_ = key_prefix; + prefix_initialized = true; } if (!ParseInternalKey(key, &ikey)) { // log error diff --git a/db/db_test.cc b/db/db_test.cc index f881aed98..d3bbf8382 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -3816,6 +3816,56 @@ TEST(DBTest, CompactionFilterV2WithValueChange) { } } +TEST(DBTest, CompactionFilterV2NULLPrefix) { + Options options = CurrentOptions(); + options.num_levels = 3; + options.max_mem_compaction_level = 0; + auto prefix_extractor = NewFixedPrefixTransform(8); + options.compaction_filter_factory_v2 = + std::make_shared(prefix_extractor); + // In a testing environment, we can only flush the application + // compaction filter buffer using universal compaction + option_config_ = kUniversalCompaction; + options.compaction_style = (rocksdb::CompactionStyle)1; + Reopen(&options); + + // Write 100K+1 keys, these are written to a few files + // in L0. We do this so that the current snapshot points + // to the 100001 key.The compaction filter is not invoked + // on keys that are visible via a snapshot because we + // anyways cannot delete it. + const std::string value(10, 'x'); + char first_key[100]; + snprintf(first_key, sizeof(first_key), "%s0000%010d", "NULL", 1); + Put(first_key, value); + for (int i = 1; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "%08d%010d", i, i); + Put(key, value); + } + + char last_key[100]; + snprintf(last_key, sizeof(last_key), "%s0000%010d", "NULL", 2); + Put(last_key, value); + + // push all files to lower levels + dbfull()->TEST_FlushMemTable(); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + + // verify that all keys now have the new value that + // was set by the compaction process. + std::string newvalue = Get(first_key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + newvalue = Get(last_key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + for (int i = 1; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "%08d%010d", i, i); + std::string newvalue = Get(key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + } +} + TEST(DBTest, SparseMerge) { do { Options options = CurrentOptions(); From 5c44a8db61d529dab0323aaba63097a582017cc7 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Tue, 25 Mar 2014 12:53:23 -0700 Subject: [PATCH 3/4] fallocate_with_keep_size is false for LogWrites --- util/env_posix.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/env_posix.cc b/util/env_posix.cc index 856d49250..237038fcb 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -1363,7 +1363,7 @@ class PosixEnv : public Env { EnvOptions OptimizeForLogWrite(const EnvOptions& env_options) const { EnvOptions optimized = env_options; optimized.use_mmap_writes = false; - optimized.fallocate_with_keep_size = true; + optimized.fallocate_with_keep_size = false; return optimized; } From b9ce156e381ad1f4d7baad0f285c3f429812ea4f Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang Date: Tue, 25 Mar 2014 13:38:28 -0700 Subject: [PATCH 4/4] Add assert to MergeOperator::PartialMergeMulti to check # of operands. Summary: Add assert(operands_list.size() >= 2) in MergeOperator::PartialMergeMulti to ensure it's only be called when we have at least two merge operands. Test Plan: run merge_test and stringappend_test. Reviewers: haobo, igor Reviewed By: igor CC: leveldb Differential Revision: https://reviews.facebook.net/D17169 --- db/merge_operator.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/db/merge_operator.cc b/db/merge_operator.cc index d96b165a6..a01d389e9 100644 --- a/db/merge_operator.cc +++ b/db/merge_operator.cc @@ -18,6 +18,7 @@ bool MergeOperator::PartialMergeMulti(const Slice& key, const std::deque& operand_list, std::string* new_value, Logger* logger) const { + assert(operand_list.size() >= 2); // Simply loop through the operands std::string temp_value; Slice temp_slice(operand_list[0]);