diff --git a/db/db_impl.cc b/db/db_impl.cc index 95d228ccf..d1003bfdc 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2898,6 +2898,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, compact->CleanupBatchBuffer(); compact->CleanupMergedBuffer(); compact->cur_prefix_ = kNullString; + bool prefix_initialized = false; int64_t imm_micros = 0; // Micros spent doing imm_ compactions Log(options_.info_log, @@ -2986,8 +2987,9 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, const SliceTransform* transformer = options_.compaction_filter_factory_v2->GetPrefixExtractor(); std::string key_prefix = transformer->Transform(key).ToString(); - if (compact->cur_prefix_ == kNullString) { + if (!prefix_initialized) { compact->cur_prefix_ = key_prefix; + prefix_initialized = true; } if (!ParseInternalKey(key, &ikey)) { // log error diff --git a/db/db_test.cc b/db/db_test.cc index f881aed98..d3bbf8382 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -3816,6 +3816,56 @@ TEST(DBTest, CompactionFilterV2WithValueChange) { } } +TEST(DBTest, CompactionFilterV2NULLPrefix) { + Options options = CurrentOptions(); + options.num_levels = 3; + options.max_mem_compaction_level = 0; + auto prefix_extractor = NewFixedPrefixTransform(8); + options.compaction_filter_factory_v2 = + std::make_shared(prefix_extractor); + // In a testing environment, we can only flush the application + // compaction filter buffer using universal compaction + option_config_ = kUniversalCompaction; + options.compaction_style = (rocksdb::CompactionStyle)1; + Reopen(&options); + + // Write 100K+1 keys, these are written to a few files + // in L0. We do this so that the current snapshot points + // to the 100001 key.The compaction filter is not invoked + // on keys that are visible via a snapshot because we + // anyways cannot delete it. + const std::string value(10, 'x'); + char first_key[100]; + snprintf(first_key, sizeof(first_key), "%s0000%010d", "NULL", 1); + Put(first_key, value); + for (int i = 1; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "%08d%010d", i, i); + Put(key, value); + } + + char last_key[100]; + snprintf(last_key, sizeof(last_key), "%s0000%010d", "NULL", 2); + Put(last_key, value); + + // push all files to lower levels + dbfull()->TEST_FlushMemTable(); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + + // verify that all keys now have the new value that + // was set by the compaction process. + std::string newvalue = Get(first_key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + newvalue = Get(last_key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + for (int i = 1; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "%08d%010d", i, i); + std::string newvalue = Get(key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + } +} + TEST(DBTest, SparseMerge) { do { Options options = CurrentOptions();