diff --git a/HISTORY.md b/HISTORY.md index e873a20c2..21a4b4eec 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -7,6 +7,10 @@ ### Behavior Changes * Attempting to write a merge operand without explicitly configuring `merge_operator` now fails immediately, causing the DB to enter read-only mode. Previously, failure was deferred until the `merge_operator` was needed by a user read or a background operation. +### API Changes +* `rocksdb_approximate_sizes` and `rocksdb_approximate_sizes_cf` in the C API now requires an error pointer (`char** errptr`) for receiving any error. +* All overloads of DB::GetApproximateSizes now return Status, so that any failure to obtain the sizes is indicated to the caller. + ### Bug Fixes * Truncated WALs ending in incomplete records can no longer produce gaps in the recovered data when `WALRecoveryMode::kPointInTimeRecovery` is used. Gaps are still possible when WALs are truncated exactly on record boundaries; for complete protection, users should enable `track_and_verify_wals_in_manifest`. * Fix a bug where compressed blocks read by MultiGet are not inserted into the compressed block cache when use_direct_reads = true. diff --git a/Makefile b/Makefile index 1964ffe20..ac98608b2 100644 --- a/Makefile +++ b/Makefile @@ -612,7 +612,12 @@ ifdef ASSERT_STATUS_CHECKED db_blob_basic_test \ db_blob_index_test \ db_block_cache_test \ + db_compaction_test \ + db_compaction_filter_test \ + db_dynamic_level_test \ db_flush_test \ + db_inplace_update_test \ + db_io_failure_test \ db_iterator_test \ db_logical_block_size_cache_test \ db_memtable_test \ @@ -629,6 +634,7 @@ ifdef ASSERT_STATUS_CHECKED deletefile_test \ external_sst_file_test \ options_file_test \ + db_sst_test \ db_statistics_test \ db_table_properties_test \ db_tailing_iter_test \ diff --git a/db/c.cc b/db/c.cc index e9f520e25..b93b42980 100644 --- a/db/c.cc +++ b/db/c.cc @@ -1388,34 +1388,39 @@ char* rocksdb_property_value_cf( } } -void rocksdb_approximate_sizes( - rocksdb_t* db, - int num_ranges, - const char* const* range_start_key, const size_t* range_start_key_len, - const char* const* range_limit_key, const size_t* range_limit_key_len, - uint64_t* sizes) { +void rocksdb_approximate_sizes(rocksdb_t* db, int num_ranges, + const char* const* range_start_key, + const size_t* range_start_key_len, + const char* const* range_limit_key, + const size_t* range_limit_key_len, + uint64_t* sizes, char** errptr) { Range* ranges = new Range[num_ranges]; for (int i = 0; i < num_ranges; i++) { ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); } - db->rep->GetApproximateSizes(ranges, num_ranges, sizes); + Status s = db->rep->GetApproximateSizes(ranges, num_ranges, sizes); + if (!s.ok()) { + SaveError(errptr, s); + } delete[] ranges; } void rocksdb_approximate_sizes_cf( - rocksdb_t* db, - rocksdb_column_family_handle_t* column_family, - int num_ranges, - const char* const* range_start_key, const size_t* range_start_key_len, - const char* const* range_limit_key, const size_t* range_limit_key_len, - uint64_t* sizes) { + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr) { Range* ranges = new Range[num_ranges]; for (int i = 0; i < num_ranges; i++) { ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); } - db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes); + Status s = db->rep->GetApproximateSizes(column_family->rep, ranges, + num_ranges, sizes); + if (!s.ok()) { + SaveError(errptr, s); + } delete[] ranges; } diff --git a/db/c_test.c b/db/c_test.c index c060eb50d..df2c0e4d5 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -988,7 +988,9 @@ int main(int argc, char** argv) { &err); CheckNoError(err); } - rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes); + rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes, + &err); + CheckNoError(err); CheckCondition(sizes[0] > 0); CheckCondition(sizes[1] > 0); } diff --git a/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc index 1edb270fe..325cc247e 100644 --- a/db/compaction/compaction_job_stats_test.cc +++ b/db/compaction/compaction_job_stats_test.cc @@ -297,15 +297,14 @@ class CompactionJobStatsTest : public testing::Test, return result; } - uint64_t Size(const Slice& start, const Slice& limit, int cf = 0) { + Status Size(uint64_t* size, const Slice& start, const Slice& limit, + int cf = 0) { Range r(start, limit); - uint64_t size; if (cf == 0) { - db_->GetApproximateSizes(&r, 1, &size); + return db_->GetApproximateSizes(&r, 1, size); } else { - db_->GetApproximateSizes(handles_[1], &r, 1, &size); + return db_->GetApproximateSizes(handles_[1], &r, 1, size); } - return size; } void Compact(int cf, const Slice& start, const Slice& limit, diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index edcce837b..24f190979 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -42,7 +42,7 @@ class DBTestCompactionFilterWithCompactParam option_config_ == kUniversalSubcompactions) { assert(options.max_subcompactions > 1); } - TryReopen(options); + Reopen(options); } }; @@ -276,7 +276,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { for (int i = 0; i < 100000; i++) { char key[100]; snprintf(key, sizeof(key), "B%010d", i); - Put(1, key, value); + ASSERT_OK(Put(1, key, value)); } ASSERT_OK(Flush(1)); @@ -284,10 +284,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { // the compaction is each level invokes the filter for // all the keys in that level. cfilter_count = 0; - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 100000); cfilter_count = 0; - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); @@ -321,6 +321,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { } iter->Next(); } + ASSERT_OK(iter->status()); } ASSERT_EQ(total, 100000); ASSERT_EQ(count, 0); @@ -337,10 +338,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { // means that all keys should pass at least once // via the compaction filter cfilter_count = 0; - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 100000); cfilter_count = 0; - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 100000); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); @@ -369,10 +370,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { // verify that at the end of the compaction process, // nothing is left. cfilter_count = 0; - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 100000); cfilter_count = 0; - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); ASSERT_EQ(cfilter_count, 0); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); @@ -387,6 +388,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { count++; iter->Next(); } + ASSERT_OK(iter->status()); ASSERT_EQ(count, 0); } @@ -427,9 +429,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) { // put some data for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - Put(ToString(table * 100 + i), "val"); + ASSERT_OK(Put(ToString(table * 100 + i), "val")); } - Flush(); + ASSERT_OK(Flush()); } // this will produce empty file (delete compaction filter) @@ -440,6 +442,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) { Iterator* itr = db_->NewIterator(ReadOptions()); itr->SeekToFirst(); + ASSERT_OK(itr->status()); // empty db ASSERT_TRUE(!itr->Valid()); @@ -463,25 +466,25 @@ TEST_P(DBTestCompactionFilterWithCompactParam, for (int i = 0; i < 100001; i++) { char key[100]; snprintf(key, sizeof(key), "B%010d", i); - Put(1, key, value); + ASSERT_OK(Put(1, key, value)); } // push all files to lower levels ASSERT_OK(Flush(1)); if (option_config_ != kUniversalCompactionMultiLevel && option_config_ != kUniversalSubcompactions) { - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); } else { - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, - nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], + nullptr, nullptr)); } // re-write all data again for (int i = 0; i < 100001; i++) { char key[100]; snprintf(key, sizeof(key), "B%010d", i); - Put(1, key, value); + ASSERT_OK(Put(1, key, value)); } // push all files to lower levels. This should @@ -489,11 +492,11 @@ TEST_P(DBTestCompactionFilterWithCompactParam, ASSERT_OK(Flush(1)); if (option_config_ != kUniversalCompactionMultiLevel && option_config_ != kUniversalSubcompactions) { - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); } else { - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, - nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], + nullptr, nullptr)); } // verify that all keys now have the new value that @@ -531,7 +534,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) { ASSERT_OK(Flush()); std::string newvalue = Get("foo"); ASSERT_EQ(newvalue, three); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); newvalue = Get("foo"); ASSERT_EQ(newvalue, three); @@ -539,12 +542,12 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) { // merge keys. ASSERT_OK(db_->Put(WriteOptions(), "bar", two)); ASSERT_OK(Flush()); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); newvalue = Get("bar"); ASSERT_EQ("NOT_FOUND", newvalue); ASSERT_OK(db_->Merge(WriteOptions(), "bar", two)); ASSERT_OK(Flush()); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); newvalue = Get("bar"); ASSERT_EQ(two, two); @@ -555,7 +558,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) { ASSERT_OK(Flush()); newvalue = Get("foobar"); ASSERT_EQ(newvalue, three); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); newvalue = Get("foobar"); ASSERT_EQ(newvalue, three); @@ -568,7 +571,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) { ASSERT_OK(Flush()); newvalue = Get("barfoo"); ASSERT_EQ(newvalue, four); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); newvalue = Get("barfoo"); ASSERT_EQ(newvalue, four); } @@ -590,21 +593,21 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) { for (int i = 0; i < num_keys_per_file; i++) { char key[100]; snprintf(key, sizeof(key), "B%08d%02d", i, j); - Put(key, value); + ASSERT_OK(Put(key, value)); } - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Make sure next file is much smaller so automatic compaction will not // be triggered. num_keys_per_file /= 2; } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Force a manual compaction cfilter_count = 0; filter->expect_manual_compaction_.store(true); filter->expect_full_compaction_.store(true); filter->expect_cf_id_.store(0); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(cfilter_count, 700); ASSERT_EQ(NumSortedRuns(0), 1); ASSERT_TRUE(filter->compaction_filter_created()); @@ -654,14 +657,14 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) { for (int i = 0; i < num_keys_per_file; i++) { char key[100]; snprintf(key, sizeof(key), "B%08d%02d", i, j); - Put(1, key, value); + ASSERT_OK(Put(1, key, value)); } - Flush(1); + ASSERT_OK(Flush(1)); // Make sure next file is much smaller so automatic compaction will not // be triggered. num_keys_per_file /= 2; } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(filter->compaction_filter_created()); } @@ -680,9 +683,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { const Snapshot* snapshot = nullptr; for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10; ++i) { - Put(ToString(table * 100 + i), "val"); + ASSERT_OK(Put(ToString(table * 100 + i), "val")); } - Flush(); + ASSERT_OK(Flush()); if (table == 0) { snapshot = db_->GetSnapshot(); @@ -702,6 +705,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { read_options.snapshot = snapshot; std::unique_ptr iter(db_->NewIterator(read_options)); iter->SeekToFirst(); + ASSERT_OK(iter->status()); int count = 0; while (iter->Valid()) { count++; @@ -710,6 +714,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { ASSERT_EQ(count, 6); read_options.snapshot = nullptr; std::unique_ptr iter1(db_->NewIterator(read_options)); + ASSERT_OK(iter1->status()); iter1->SeekToFirst(); count = 0; while (iter1->Valid()) { @@ -740,9 +745,9 @@ TEST_F(DBTestCompactionFilter, SkipUntil) { for (int i = table * 6; i < 39 + table * 11; ++i) { char key[100]; snprintf(key, sizeof(key), "%010d", table * 100 + i); - Put(key, std::to_string(table * 1000 + i)); + ASSERT_OK(Put(key, std::to_string(table * 1000 + i))); } - Flush(); + ASSERT_OK(Flush()); } cfilter_skips = 0; @@ -781,10 +786,10 @@ TEST_F(DBTestCompactionFilter, SkipUntilWithBloomFilter) { options.create_if_missing = true; DestroyAndReopen(options); - Put("0000000010", "v10"); - Put("0000000020", "v20"); // skipped - Put("0000000050", "v50"); - Flush(); + ASSERT_OK(Put("0000000010", "v10")); + ASSERT_OK(Put("0000000020", "v20")); // skipped + ASSERT_OK(Put("0000000050", "v50")); + ASSERT_OK(Flush()); cfilter_skips = 0; EXPECT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); @@ -822,13 +827,13 @@ TEST_F(DBTestCompactionFilter, IgnoreSnapshotsFalse) { options.compaction_filter = new TestNotSupportedFilter(); DestroyAndReopen(options); - Put("a", "v10"); - Put("z", "v20"); - Flush(); + ASSERT_OK(Put("a", "v10")); + ASSERT_OK(Put("z", "v20")); + ASSERT_OK(Flush()); - Put("a", "v10"); - Put("z", "v20"); - Flush(); + ASSERT_OK(Put("a", "v10")); + ASSERT_OK(Put("z", "v20")); + ASSERT_OK(Flush()); // Comapction should fail because IgnoreSnapshots() = false EXPECT_TRUE(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr) diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 725811c56..c62a723af 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -279,7 +279,7 @@ void VerifyCompactionStats(ColumnFamilyData& cfd, const CompactionStatsCollector& collector) { #ifndef NDEBUG InternalStats* internal_stats_ptr = cfd.internal_stats(); - ASSERT_TRUE(internal_stats_ptr != nullptr); + ASSERT_NE(internal_stats_ptr, nullptr); const std::vector& comp_stats = internal_stats_ptr->TEST_GetCompactionStats(); const int num_of_reasons = static_cast(CompactionReason::kNumOfReasons); @@ -351,16 +351,16 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTrigger) { values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[0] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[0])); for (int k = 0; k < kTestSize; ++k) { ASSERT_OK(Delete(Key(k))); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[1] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[1])); // must have much smaller db size. ASSERT_GT(db_size[0] / 3, db_size[1]); @@ -410,8 +410,9 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) { cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_TRUE( + dbfull()->CompactRange(cro, nullptr, nullptr).IsInvalidArgument()); // check that normal user iterator doesn't see anything Iterator* db_iter = dbfull()->NewIterator(ReadOptions()); @@ -419,6 +420,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) { for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) { i++; } + ASSERT_OK(db_iter->status()); ASSERT_EQ(i, 0); delete db_iter; @@ -426,6 +428,7 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) { ReadOptions ro; ro.iter_start_seqnum=1; db_iter = dbfull()->NewIterator(ro); + ASSERT_OK(db_iter->status()); i = 0; for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) { i++; @@ -435,9 +438,10 @@ TEST_P(DBCompactionTestWithParam, CompactionsPreserveDeletes) { // now all deletes should be gone SetPreserveDeletesSequenceNumber(100000000); - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_NOK(dbfull()->CompactRange(cro, nullptr, nullptr)); db_iter = dbfull()->NewIterator(ro); + ASSERT_TRUE(db_iter->status().IsInvalidArgument()); i = 0; for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) { i++; @@ -523,8 +527,8 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) { ASSERT_OK(Put(Key(10 - k), "bar")); if (k < options.level0_file_num_compaction_trigger - 1) { num_table_cache_lookup = 0; - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // preloading iterator issues one table cache lookup and create // a new table reader, if not preloaded. int old_num_table_cache_lookup = num_table_cache_lookup; @@ -542,8 +546,8 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) { num_table_cache_lookup = 0; num_new_table_reader = 0; - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Preloading iterator issues one table cache lookup and creates // a new table reader. One file is created for flush and one for compaction. // Compaction inputs make no table cache look-up for data/range deletion @@ -570,7 +574,7 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) { cro.change_level = true; cro.target_level = 2; cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); // Only verifying compaction outputs issues one table cache lookup // for both data block and range deletion block). // May preload table cache too. @@ -611,9 +615,9 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) { values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[0] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[0])); Close(); // round 2 --- disable auto-compactions and issue deletions. @@ -624,7 +628,7 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) { for (int k = 0; k < kTestSize; ++k) { ASSERT_OK(Delete(Key(k))); } - db_size[1] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[1])); Close(); // as auto_compaction is off, we shouldn't see too much reduce // in db size. @@ -638,9 +642,9 @@ TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) { for (int k = 0; k < kTestSize / 10; ++k) { ASSERT_OK(Put(Key(k), values[k])); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[2] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[2])); // this time we're expecting significant drop in size. ASSERT_GT(db_size[0] / 3, db_size[2]); } @@ -658,7 +662,7 @@ TEST_F(DBCompactionTest, CompactRangeBottomPri) { CompactRangeOptions cro; cro.change_level = true; cro.target_level = 2; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); } ASSERT_EQ("0,0,3", FilesPerLevel(0)); @@ -691,7 +695,7 @@ TEST_F(DBCompactionTest, CompactRangeBottomPri) { }); SyncPoint::GetInstance()->EnableProcessing(); env_->SetBackgroundThreads(1, Env::Priority::BOTTOM); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(1, low_pri_count); ASSERT_EQ(1, bottom_pri_count); ASSERT_EQ("0,0,2", FilesPerLevel(0)); @@ -699,12 +703,12 @@ TEST_F(DBCompactionTest, CompactRangeBottomPri) { // Recompact bottom most level uses bottom pool CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); ASSERT_EQ(1, low_pri_count); ASSERT_EQ(2, bottom_pri_count); env_->SetBackgroundThreads(0, Env::Priority::BOTTOM); - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); // Low pri pool is used if bottom pool has size 0. ASSERT_EQ(2, low_pri_count); ASSERT_EQ(2, bottom_pri_count); @@ -729,9 +733,9 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { values.push_back(rnd.RandomString(kCDTValueSize)); ASSERT_OK(Put(Key(k), values[k])); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[0] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[0])); Close(); // round 2 --- disable auto-compactions and issue deletions. @@ -744,7 +748,7 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { for (int k = 0; k < kTestSize; ++k) { ASSERT_OK(Delete(Key(k))); } - db_size[1] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[1])); Close(); // as auto_compaction is off, we shouldn't see too much reduce // in db size. @@ -754,9 +758,9 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { // deletion compensation still work. options.disable_auto_compactions = false; Reopen(options); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); - db_size[2] = Size(Key(0), Key(kTestSize - 1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + ASSERT_OK(Size(Key(0), Key(kTestSize - 1), &db_size[2])); if (options.skip_stats_update_on_db_open) { // If update stats on DB::Open is disable, we don't expect @@ -794,7 +798,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) { } // put extra key to trigger flush ASSERT_OK(Put(1, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1])); ASSERT_EQ(NumTableFilesAtLevel(0, 1), num + 1); } @@ -806,7 +810,7 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) { } // put extra key to trigger flush ASSERT_OK(Put(1, "", "")); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 1); @@ -848,7 +852,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) { } // put extra key to trigger flush ASSERT_OK(Put(cf, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1); } } @@ -865,7 +869,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) { } // put extra key to trigger flush ASSERT_OK(Put(2, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[2])); ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1, NumTableFilesAtLevel(0, 2)); } @@ -876,7 +880,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) { sleeping_tasks[i].WakeUp(); sleeping_tasks[i].WaitUntilDone(); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Verify number of compactions allowed will come back to 1. @@ -893,7 +897,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) { } // put extra key to trigger flush ASSERT_OK(Put(cf, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1); } } @@ -926,8 +930,8 @@ TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) { // Reopening moves updates to level-0 ReopenWithColumnFamilies({"default", "pikachu"}, options); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], - true /* disallow trivial move */); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], + true /* disallow trivial move */)); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_GT(NumTableFilesAtLevel(1, 1), 1); @@ -971,27 +975,27 @@ TEST_F(DBCompactionTest, UserKeyCrossFile1) { DestroyAndReopen(options); // create first file and flush to l0 - Put("4", "A"); - Put("3", "A"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); - - Put("2", "A"); - Delete("3"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("4", "A")); + ASSERT_OK(Put("3", "A")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + + ASSERT_OK(Put("2", "A")); + ASSERT_OK(Delete("3")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ("NOT_FOUND", Get("3")); // move both files down to l1 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ("NOT_FOUND", Get("3")); for (int i = 0; i < 3; i++) { - Put("2", "B"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("2", "B")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("NOT_FOUND", Get("3")); } @@ -1004,27 +1008,27 @@ TEST_F(DBCompactionTest, UserKeyCrossFile2) { DestroyAndReopen(options); // create first file and flush to l0 - Put("4", "A"); - Put("3", "A"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); - - Put("2", "A"); - SingleDelete("3"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("4", "A")); + ASSERT_OK(Put("3", "A")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + + ASSERT_OK(Put("2", "A")); + ASSERT_OK(SingleDelete("3")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ("NOT_FOUND", Get("3")); // move both files down to l1 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ("NOT_FOUND", Get("3")); for (int i = 0; i < 3; i++) { - Put("2", "B"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("2", "B")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("NOT_FOUND", Get("3")); } @@ -1040,17 +1044,17 @@ TEST_F(DBCompactionTest, CompactionSstPartitioner) { DestroyAndReopen(options); // create first file and flush to l0 - Put("aaaa1", "A"); - Put("bbbb1", "B"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("aaaa1", "A")); + ASSERT_OK(Put("bbbb1", "B")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); - Put("aaaa1", "A2"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("aaaa1", "A2")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); // move both files down to l1 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); std::vector files; dbfull()->GetLiveFilesMetaData(&files); @@ -1070,11 +1074,11 @@ TEST_F(DBCompactionTest, CompactionSstPartitionerNonTrivial) { DestroyAndReopen(options); // create first file and flush to l0 - Put("aaaa1", "A"); - Put("bbbb1", "B"); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(true); + ASSERT_OK(Put("aaaa1", "A")); + ASSERT_OK(Put("bbbb1", "B")); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact(true)); std::vector files; dbfull()->GetLiveFilesMetaData(&files); @@ -1104,22 +1108,23 @@ TEST_F(DBCompactionTest, ZeroSeqIdCompaction) { // create first file and flush to l0 for (auto& key : {"1", "2", "3", "3", "3", "3"}) { - Put(key, std::string(key_len, 'A')); + ASSERT_OK(Put(key, std::string(key_len, 'A'))); snaps.push_back(dbfull()->GetSnapshot()); } - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); // create second file and flush to l0 for (auto& key : {"3", "4", "5", "6", "7", "8"}) { - Put(key, std::string(key_len, 'A')); + ASSERT_OK(Put(key, std::string(key_len, 'A'))); snaps.push_back(dbfull()->GetSnapshot()); } - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); // move both files down to l1 - dbfull()->CompactFiles(compact_opt, collector->GetFlushedFiles(), 1); + ASSERT_OK( + dbfull()->CompactFiles(compact_opt, collector->GetFlushedFiles(), 1)); // release snap so that first instance of key(3) can have seqId=0 for (auto snap : snaps) { @@ -1128,12 +1133,12 @@ TEST_F(DBCompactionTest, ZeroSeqIdCompaction) { // create 3 files in l0 so to trigger compaction for (int i = 0; i < options.level0_file_num_compaction_trigger; i++) { - Put("2", std::string(1, 'A')); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put("2", std::string(1, 'A'))); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(Put("", "")); } @@ -1148,12 +1153,12 @@ TEST_F(DBCompactionTest, ManualCompactionUnknownOutputSize) { for (int i = 0; i < 2; ++i) { for (int j = 0; j < options.level0_file_num_compaction_trigger; j++) { // make l0 files' ranges overlap to avoid trivial move - Put(std::to_string(2 * i), std::string(1, 'A')); - Put(std::to_string(2 * i + 1), std::string(1, 'A')); - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put(std::to_string(2 * i), std::string(1, 'A'))); + ASSERT_OK(Put(std::to_string(2 * i + 1), std::string(1, 'A'))); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 0), i + 1); } @@ -1169,7 +1174,7 @@ TEST_F(DBCompactionTest, ManualCompactionUnknownOutputSize) { // note CompactionOptions::output_file_size_limit is unset. CompactionOptions compact_opt; compact_opt.compression = kNoCompression; - dbfull()->CompactFiles(compact_opt, input_filenames, 1); + ASSERT_OK(dbfull()->CompactFiles(compact_opt, input_filenames, 1)); } // Check that writes done during a memtable compaction are recovered @@ -1230,7 +1235,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { cro.exclusive_manual_compaction = exclusive_manual_compaction_; // Compaction will initiate a trivial move from L0 to L1 - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); // File moved From L0 to L1 ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); // 0 files in L0 @@ -1299,7 +1304,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { // Since data is non-overlapping we expect compaction to initiate // a trivial move - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); // We expect that all the files were trivially moved from L0 to L1 ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files); @@ -1336,7 +1341,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { ASSERT_OK(Flush()); } - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); for (size_t i = 0; i < ranges.size(); i++) { for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { @@ -1540,8 +1545,8 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { ASSERT_EQ("3,0,0,0,0,1,2", FilesPerLevel(0)); TEST_SYNC_POINT("DBCompaction::ManualPartial:5"); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // After two non-trivial compactions are installed, there is 1 file in L6, and // 1 file in L1 ASSERT_EQ("0,1,0,0,0,0,1", FilesPerLevel(0)); @@ -1654,7 +1659,7 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { for (int32_t j = 300; j < 4300; j++) { if (j == 2300) { ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); @@ -1670,8 +1675,8 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { } TEST_SYNC_POINT("DBCompaction::PartialFill:2"); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); threads.join(); for (int32_t i = 0; i < 4300; i++) { @@ -1689,12 +1694,12 @@ TEST_F(DBCompactionTest, ManualCompactionWithUnorderedWrite) { Options options = CurrentOptions(); options.unordered_write = true; DestroyAndReopen(options); - Put("foo", "v1"); + ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Flush()); - Put("bar", "v1"); + ASSERT_OK(Put("bar", "v1")); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - port::Thread writer([&]() { Put("foo", "v2"); }); + port::Thread writer([&]() { ASSERT_OK(Put("foo", "v2")); }); TEST_SYNC_POINT( "DBCompactionTest::ManualCompactionWithUnorderedWrite:WaitWriteWAL"); @@ -1760,15 +1765,15 @@ TEST_F(DBCompactionTest, DeleteFileRange) { for (int32_t j = 300; j < 4300; j++) { if (j == 2300) { ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } values[j] = rnd.RandomString(value_size); ASSERT_OK(Put(Key(j), values[j])); } } ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Verify level sizes uint64_t target_size = 4 * options.max_bytes_for_level_base; @@ -1778,7 +1783,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) { options.max_bytes_for_level_multiplier); } - size_t old_num_files = CountFiles(); + const size_t old_num_files = CountFiles(); std::string begin_string = Key(1000); std::string end_string = Key(2000); Slice begin(begin_string); @@ -1813,7 +1818,7 @@ TEST_F(DBCompactionTest, DeleteFileRange) { compact_options.change_level = true; compact_options.target_level = 1; ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK( DeleteFilesInRange(db_, db_->DefaultColumnFamily(), nullptr, nullptr)); @@ -1822,12 +1827,11 @@ TEST_F(DBCompactionTest, DeleteFileRange) { for (int32_t i = 0; i < 4300; i++) { ReadOptions roptions; std::string result; - Status s = db_->Get(roptions, Key(i), &result); - ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(db_->Get(roptions, Key(i), &result).IsNotFound()); deleted_count2++; } ASSERT_GT(deleted_count2, deleted_count); - size_t new_num_files = CountFiles(); + const size_t new_num_files = CountFiles(); ASSERT_GT(old_num_files, new_num_files); } @@ -1982,14 +1986,14 @@ TEST_F(DBCompactionTest, DeleteFileRangeFileEndpointsOverlapBug) { std::string vals[kNumL0Files]; for (int i = 0; i < kNumL0Files; ++i) { vals[i] = rnd.RandomString(kValSize); - Put(Key(i), vals[i]); - Put(Key(i + 1), vals[i]); - Flush(); + ASSERT_OK(Put(Key(i), vals[i])); + ASSERT_OK(Put(Key(i + 1), vals[i])); + ASSERT_OK(Flush()); if (i == 0) { snapshot = db_->GetSnapshot(); } } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Verify `DeleteFilesInRange` can't drop only file 0 which would cause // "1 -> vals[0]" to reappear. @@ -2076,16 +2080,8 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) { options.num_levels = 4; options.max_bytes_for_level_base = 400 * 1024; options.max_subcompactions = max_subcompactions_; - // options = CurrentOptions(options); - std::vector filenames; - env_->GetChildren(options.db_paths[1].path, &filenames); - // Delete archival files. - for (size_t i = 0; i < filenames.size(); ++i) { - env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); - } - env_->DeleteDir(options.db_paths[1].path); - Reopen(options); + DestroyAndReopen(options); Random rnd(301); int key_idx = 0; @@ -2193,16 +2189,8 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) { options.num_levels = 4; options.max_bytes_for_level_base = 400 * 1024; options.max_subcompactions = max_subcompactions_; - // options = CurrentOptions(options); - std::vector filenames; - env_->GetChildren(options.db_paths[1].path, &filenames); - // Delete archival files. - for (size_t i = 0; i < filenames.size(); ++i) { - env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); - } - env_->DeleteDir(options.db_paths[1].path); - Reopen(options); + DestroyAndReopen(options); Random rnd(301); int key_idx = 0; @@ -2439,7 +2427,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); } ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(TotalTableFiles(1, 4), 1); int non_level0_num_files = 0; @@ -2475,7 +2463,8 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { compact_options.bottommost_level_compaction = BottommostLevelCompaction::kForce; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_OK( + dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr)); // Only 1 file in L0 ASSERT_EQ("1", FilesPerLevel(1)); @@ -2496,9 +2485,9 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) { ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000))); } - dbfull()->Flush(FlushOptions()); + ASSERT_OK(dbfull()->Flush(FlushOptions())); ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); for (int i = 1; i < options.num_levels; i++) { ASSERT_EQ(NumTableFilesAtLevel(i, 1), 0); @@ -2508,6 +2497,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { // compaction style std::string keys_in_db; Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); + ASSERT_OK(iter->status()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { keys_in_db.append(iter->key().ToString()); keys_in_db.push_back(','); @@ -2545,24 +2535,24 @@ TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_a) { TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_b) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - Put(1, "", ""); + ASSERT_OK(Put(1, "", "")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Delete(1, "e"); - Put(1, "", ""); + ASSERT_OK(Delete(1, "e")); + ASSERT_OK(Put(1, "", "")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Put(1, "c", "cv"); + ASSERT_OK(Put(1, "c", "cv")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Put(1, "", ""); + ASSERT_OK(Put(1, "", "")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Put(1, "", ""); + ASSERT_OK(Put(1, "", "")); env_->SleepForMicroseconds(1000000); // Wait for compaction to finish ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Put(1, "d", "dv"); + ASSERT_OK(Put(1, "d", "dv")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Put(1, "", ""); + ASSERT_OK(Put(1, "", "")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - Delete(1, "d"); - Delete(1, "b"); + ASSERT_OK(Delete(1, "d")); + ASSERT_OK(Delete(1, "b")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); ASSERT_EQ("(->)(c->cv)", Contents(1)); env_->SleepForMicroseconds(1000000); // Wait for compaction to finish @@ -2579,34 +2569,35 @@ TEST_F(DBCompactionTest, ManualAutoRace) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - Put(1, "foo", ""); - Put(1, "bar", ""); - Flush(1); - Put(1, "foo", ""); - Put(1, "bar", ""); + ASSERT_OK(Put(1, "foo", "")); + ASSERT_OK(Put(1, "bar", "")); + ASSERT_OK(Flush(1)); + ASSERT_OK(Put(1, "foo", "")); + ASSERT_OK(Put(1, "bar", "")); // Generate four files in CF 0, which should trigger an auto compaction - Put("foo", ""); - Put("bar", ""); - Flush(); - Put("foo", ""); - Put("bar", ""); - Flush(); - Put("foo", ""); - Put("bar", ""); - Flush(); - Put("foo", ""); - Put("bar", ""); - Flush(); + ASSERT_OK(Put("foo", "")); + ASSERT_OK(Put("bar", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "")); + ASSERT_OK(Put("bar", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "")); + ASSERT_OK(Put("bar", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "")); + ASSERT_OK(Put("bar", "")); + ASSERT_OK(Flush()); // The auto compaction is scheduled but waited until here TEST_SYNC_POINT("DBCompactionTest::ManualAutoRace:1"); // The auto compaction will wait until the manual compaction is registerd // before processing so that it will be cancelled. - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr)); ASSERT_EQ("0,1", FilesPerLevel(1)); // Eventually the cancelled compaction will be rescheduled and executed. - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,1", FilesPerLevel(0)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } @@ -2651,7 +2642,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompaction) { options.statistics->getTickerCount(BLOCK_CACHE_ADD); CompactRangeOptions cro; cro.exclusive_manual_compaction = exclusive_manual_compaction_; - db_->CompactRange(cro, handles_[1], nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, handles_[1], nullptr, nullptr)); // Verify manual compaction doesn't fill block cache ASSERT_EQ(prev_block_cache_add, options.statistics->getTickerCount(BLOCK_CACHE_ADD)); @@ -2732,7 +2723,8 @@ TEST_P(DBCompactionTestWithParam, ManualLevelCompactionOutputPathId) { CompactRangeOptions compact_options; compact_options.target_path_id = 1; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,1", FilesPerLevel(1)); @@ -2791,8 +2783,8 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize))); } - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1])); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ColumnFamilyMetaData cf_meta; dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); @@ -2868,10 +2860,10 @@ TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) { keys.emplace_back(rnd.RandomString(kKeySize)); values.emplace_back(rnd.RandomString(kKvSize - kKeySize)); ASSERT_OK(Put(Slice(keys[k]), Slice(values[k]))); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_FlushMemTable(true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); // Make sure the number of L0 files can trigger compaction. ASSERT_GE(NumTableFilesAtLevel(0), options.level0_file_num_compaction_trigger); @@ -2937,7 +2929,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { ASSERT_OK(Flush()); } // this should execute L0->L1 - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,1", FilesPerLevel(0)); // block compactions @@ -2954,7 +2946,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { sleeping_task.WaitUntilDone(); // this should execute L1->L2 (move) - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,0,1", FilesPerLevel(0)); @@ -2972,7 +2964,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { ASSERT_OK(Flush()); } // this should execute both L0->L1 and L1->L2 (merge with previous file) - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,0,2", FilesPerLevel(0)); @@ -2980,6 +2972,7 @@ TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { ASSERT_OK(env_->FileExists(dbname_ + moved_file_name)); listener->SetExpectedFileName(dbname_ + moved_file_name); + ASSERT_OK(iterator->status()); iterator.reset(); // this file should have been compacted away @@ -3142,7 +3135,7 @@ TEST_F(DBCompactionTest, SuggestCompactRangeNoTwoLevel0Compactions) { for (int num = 0; num < 10; num++) { GenerateNewRandomFile(&rnd); } - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency( {{"CompactionJob::Run():Start", @@ -3163,7 +3156,7 @@ TEST_F(DBCompactionTest, SuggestCompactRangeNoTwoLevel0Compactions) { "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:1"); GenerateNewRandomFile(&rnd, /* nowait */ true); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr)); for (int num = 0; num < options.level0_file_num_compaction_trigger + 1; num++) { @@ -3173,7 +3166,7 @@ TEST_F(DBCompactionTest, SuggestCompactRangeNoTwoLevel0Compactions) { TEST_SYNC_POINT( "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:2"); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } static std::string ShortKey(int i) { @@ -3337,7 +3330,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0Compaction) { } ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); std::vector> level_to_files; @@ -3415,7 +3408,7 @@ TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) { ASSERT_OK(Put(Key(i + 1), value)); ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); std::vector> level_to_files; @@ -3463,7 +3456,7 @@ TEST_P(DBCompactionTestWithParam, FullCompactionInBottomPriThreadPool) { int key_idx = 0; GenerateNewFile(&rnd, &key_idx); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(1, num_bottom_pri_compactions); @@ -3487,8 +3480,8 @@ TEST_F(DBCompactionTest, OptimizedDeletionObsoleting) { // So key 0, 2, and 4+ fall outside these levels' key-ranges. for (int level = 2; level >= 1; --level) { for (int i = 0; i < 2; ++i) { - Put(Key(2 * i + 1), "val"); - Flush(); + ASSERT_OK(Put(Key(2 * i + 1), "val")); + ASSERT_OK(Flush()); } MoveFilesToLevel(level); ASSERT_EQ(2, NumTableFilesAtLevel(level)); @@ -3498,11 +3491,11 @@ TEST_F(DBCompactionTest, OptimizedDeletionObsoleting) { // - Tombstones for keys 2 and 4 can be dropped early. // - Tombstones for keys 1 and 3 must be kept due to L2 files' key-ranges. for (int i = 0; i < kNumL0Files; ++i) { - Put(Key(0), "val"); // sentinel to prevent trivial move - Delete(Key(i + 1)); - Flush(); + ASSERT_OK(Put(Key(0), "val")); // sentinel to prevent trivial move + ASSERT_OK(Delete(Key(i + 1))); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); for (int i = 0; i < kNumL0Files; ++i) { std::string value; @@ -3566,10 +3559,10 @@ TEST_F(DBCompactionTest, CompactFilesPendingL0Bug) { TEST_F(DBCompactionTest, CompactFilesOverlapInL0Bug) { // Regression test for bug of not pulling in L0 files that overlap the user- // specified input files in time- and key-ranges. - Put(Key(0), "old_val"); - Flush(); - Put(Key(0), "new_val"); - Flush(); + ASSERT_OK(Put(Key(0), "old_val")); + ASSERT_OK(Flush()); + ASSERT_OK(Put(Key(0), "new_val")); + ASSERT_OK(Flush()); ColumnFamilyMetaData cf_meta; dbfull()->GetColumnFamilyMetaData(dbfull()->DefaultColumnFamily(), &cf_meta); @@ -3615,12 +3608,12 @@ TEST_F(DBCompactionTest, CompactBottomLevelFilesWithDeletions) { ASSERT_OK(Delete(Key(j))); } } - Flush(); + ASSERT_OK(Flush()); if (i < kNumLevelFiles - 1) { ASSERT_EQ(i + 1, NumTableFilesAtLevel(0)); } } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(kNumLevelFiles, NumTableFilesAtLevel(1)); std::vector pre_release_metadata, post_release_metadata; @@ -3641,7 +3634,7 @@ TEST_F(DBCompactionTest, CompactBottomLevelFilesWithDeletions) { CompactionReason::kBottommostFiles); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); db_->GetLiveFilesMetaData(&post_release_metadata); ASSERT_EQ(pre_release_metadata.size(), post_release_metadata.size()); @@ -3690,12 +3683,12 @@ TEST_F(DBCompactionTest, NoCompactBottomLevelFilesWithDeletions) { ASSERT_OK(Delete(Key(j))); } } - Flush(); + ASSERT_OK(Flush()); if (i < kNumLevelFiles - 1) { ASSERT_EQ(i + 1, NumTableFilesAtLevel(0)); } } - dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr)); ASSERT_EQ(kNumLevelFiles, NumTableFilesAtLevel(1)); std::vector pre_release_metadata, post_release_metadata; @@ -3711,7 +3704,7 @@ TEST_F(DBCompactionTest, NoCompactBottomLevelFilesWithDeletions) { [&](void* /*arg*/) { num_compactions.fetch_add(1); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); db_->ReleaseSnapshot(snapshot); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(0, num_compactions); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -3749,9 +3742,9 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(3); ASSERT_EQ("0,0,0,2", FilesPerLevel()); @@ -3760,9 +3753,9 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK(Delete(Key(i * kNumKeysPerFile + j))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("2,0,0,2", FilesPerLevel()); MoveFilesToLevel(1); ASSERT_EQ("0,2,0,2", FilesPerLevel()); @@ -3773,14 +3766,14 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { // Just do a simple write + flush so that the Ttl expired files get // compacted. ASSERT_OK(Put("a", "1")); - Flush(); + ASSERT_OK(Flush()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { Compaction* compaction = reinterpret_cast(arg); ASSERT_TRUE(compaction->compaction_reason() == CompactionReason::kTtl); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // All non-L0 files are deleted, as they contained only deleted data. ASSERT_EQ("1", FilesPerLevel()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -3796,9 +3789,9 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(3); ASSERT_EQ("0,0,0,2", FilesPerLevel()); @@ -3807,9 +3800,9 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { for (int j = 0; j < kNumKeysPerFile; ++j) { ASSERT_OK(Delete(Key(i * kNumKeysPerFile + j))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("2,0,0,2", FilesPerLevel()); MoveFilesToLevel(1); ASSERT_EQ("0,2,0,2", FilesPerLevel()); @@ -3818,8 +3811,8 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { // trigger as ttl is set to 24 hours. env_->MockSleepForSeconds(12 * 60 * 60); ASSERT_OK(Put("a", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("1,2,0,2", FilesPerLevel()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( @@ -3832,7 +3825,7 @@ TEST_F(DBCompactionTest, LevelCompactExpiredTtlFiles) { // Dynamically change ttl to 10 hours. // This should trigger a ttl compaction, as 12 hours have already passed. ASSERT_OK(dbfull()->SetOptions({{"ttl", "36000"}})); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // All non-L0 files are deleted, as they contained only deleted data. ASSERT_EQ("1", FilesPerLevel()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -3892,7 +3885,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { for (int i = 1; i <= 100; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); // Get the first file's creation time. This will be the oldest file in the // DB. Compactions inolving this file's descendents should keep getting // this time. @@ -3905,7 +3898,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { for (int i = 101; i <= 200; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); MoveFilesToLevel(6); ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel()); @@ -3914,12 +3907,12 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { for (int i = 1; i <= 50; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); env_->MockSleepForSeconds(1 * 60 * 60); for (int i = 51; i <= 150; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); MoveFilesToLevel(4); ASSERT_EQ("0,0,0,0,2,0,2", FilesPerLevel()); @@ -3928,8 +3921,8 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { for (int i = 26; i <= 75; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(kValueSize))); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(1); ASSERT_EQ("0,1,0,0,2,0,2", FilesPerLevel()); @@ -3959,9 +3952,9 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { if (if_restart) { Reopen(options); } else { - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("1,0,0,0,0,0,1", FilesPerLevel()); ASSERT_EQ(5, ttl_compactions); @@ -3974,9 +3967,9 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) { if (if_restart) { Reopen(options); } else { - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("1,0,0,0,0,0,1", FilesPerLevel()); ASSERT_GE(ttl_compactions, 6); @@ -4041,9 +4034,9 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("2", FilesPerLevel()); ASSERT_EQ(0, periodic_compactions); @@ -4051,8 +4044,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) { // Add 50 hours and do a write env_->MockSleepForSeconds(50 * 60 * 60); ASSERT_OK(Put("a", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Assert that the files stay in the same level ASSERT_EQ("3", FilesPerLevel()); // The two old files go through the periodic compaction process @@ -4067,9 +4060,9 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) { if (if_restart) { Reopen(options); } else { - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("1,3", FilesPerLevel()); // The three old files now go through the periodic compaction process. 2 // + 3. @@ -4078,8 +4071,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompaction) { // Add another 50 hours and do another write env_->MockSleepForSeconds(50 * 60 * 60); ASSERT_OK(Put("c", "3")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("2,3", FilesPerLevel()); // The four old files now go through the periodic compaction process. 5 // + 4. @@ -4136,7 +4129,7 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithOldDB) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); // Move the first two files to L2. if (i == 1) { MoveFilesToLevel(2); @@ -4200,9 +4193,9 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(3); @@ -4213,8 +4206,8 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { // Add some time greater than periodic_compaction_time. env_->MockSleepForSeconds(50 * 60 * 60); ASSERT_OK(Put("a", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Files in the bottom level go through periodic compactions. ASSERT_EQ("1,0,0,2", FilesPerLevel()); ASSERT_EQ(2, periodic_compactions); @@ -4223,8 +4216,8 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { // Add a little more time than ttl env_->MockSleepForSeconds(11 * 60 * 60); ASSERT_OK(Put("b", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Notice that the previous file in level 1 falls down to the bottom level // due to ttl compactions, one level at a time. // And bottom level files don't get picked up for ttl compactions. @@ -4235,8 +4228,8 @@ TEST_F(DBCompactionTest, LevelPeriodicAndTtlCompaction) { // Add some time greater than periodic_compaction_time. env_->MockSleepForSeconds(50 * 60 * 60); ASSERT_OK(Put("c", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Previous L0 file falls one level at a time to bottom level due to ttl. // And all 4 bottom files go through periodic compactions. ASSERT_EQ("1,0,0,4", FilesPerLevel()); @@ -4312,9 +4305,9 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("2", FilesPerLevel()); ASSERT_EQ(0, periodic_compactions); @@ -4322,8 +4315,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) { // Add 31 days and do a write env_->MockSleepForSeconds(31 * 24 * 60 * 60); ASSERT_OK(Put("a", "1")); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Assert that the files stay in the same level ASSERT_EQ("3", FilesPerLevel()); // The two old files go through the periodic compaction process @@ -4372,16 +4365,16 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByL0FileCount) { for (int k = 0; k < 2; ++k) { ASSERT_OK(Put(Key(k), rnd.RandomString(1024))); } - Flush(); + ASSERT_OK(Flush()); } auto manual_compaction_thread = port::Thread([this]() { CompactRangeOptions cro; cro.allow_write_stall = false; - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); }); manual_compaction_thread.join(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(0, NumTableFilesAtLevel(0)); ASSERT_GT(NumTableFilesAtLevel(1), 0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -4428,17 +4421,17 @@ TEST_F(DBCompactionTest, CompactRangeDelayedByImmMemTableCount) { FlushOptions flush_opts; flush_opts.wait = false; flush_opts.allow_write_stall = true; - dbfull()->Flush(flush_opts); + ASSERT_OK(dbfull()->Flush(flush_opts)); } auto manual_compaction_thread = port::Thread([this]() { CompactRangeOptions cro; cro.allow_write_stall = false; - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); }); manual_compaction_thread.join(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ(0, NumTableFilesAtLevel(0)); ASSERT_GT(NumTableFilesAtLevel(1), 0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -4474,12 +4467,11 @@ TEST_F(DBCompactionTest, CompactRangeShutdownWhileDelayed) { for (int k = 0; k < 2; ++k) { ASSERT_OK(Put(1, Key(k), rnd.RandomString(1024))); } - Flush(1); + ASSERT_OK(Flush(1)); } auto manual_compaction_thread = port::Thread([this, i]() { CompactRangeOptions cro; cro.allow_write_stall = false; - Status s = db_->CompactRange(cro, handles_[1], nullptr, nullptr); if (i == 0) { ASSERT_TRUE(db_->CompactRange(cro, handles_[1], nullptr, nullptr) .IsColumnFamilyDropped()); @@ -4499,7 +4491,7 @@ TEST_F(DBCompactionTest, CompactRangeShutdownWhileDelayed) { manual_compaction_thread.join(); TEST_SYNC_POINT( "DBCompactionTest::CompactRangeShutdownWhileDelayed:PostManual"); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } } @@ -4534,25 +4526,26 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { for (int j = 0; j < 2; ++j) { ASSERT_OK(Put(Key(j), rnd.RandomString(1024))); } - dbfull()->Flush(flush_opts); + ASSERT_OK(dbfull()->Flush(flush_opts)); } auto manual_compaction_thread = port::Thread([this]() { CompactRangeOptions cro; cro.allow_write_stall = false; - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); }); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush"); - Put(ToString(0), rnd.RandomString(1024)); - dbfull()->Flush(flush_opts); - Put(ToString(0), rnd.RandomString(1024)); + ASSERT_OK(Put(ToString(0), rnd.RandomString(1024))); + ASSERT_OK(dbfull()->Flush(flush_opts)); + ASSERT_OK(Put(ToString(0), rnd.RandomString(1024))); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush"); manual_compaction_thread.join(); // If CompactRange's flush was skipped, the final Put above will still be // in the active memtable. std::string num_keys_in_memtable; - db_->GetProperty(DB::Properties::kNumEntriesActiveMemTable, &num_keys_in_memtable); + ASSERT_TRUE(db_->GetProperty(DB::Properties::kNumEntriesActiveMemTable, + &num_keys_in_memtable)); ASSERT_EQ(ToString(1), num_keys_in_memtable); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -4610,7 +4603,7 @@ TEST_F(DBCompactionTest, CompactRangeFlushOverlappingMemtable) { } else { ASSERT_EQ(2, num_memtable_entries); // flush anyways to prepare for next iteration - db_->Flush(FlushOptions()); + ASSERT_OK(db_->Flush(FlushOptions())); } } } @@ -4625,12 +4618,12 @@ TEST_F(DBCompactionTest, CompactionStatsTest) { for (int i = 0; i < 32; i++) { for (int j = 0; j < 5000; j++) { - Put(std::to_string(j), std::string(1, 'A')); + ASSERT_OK(Put(std::to_string(j), std::string(1, 'A'))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ColumnFamilyHandleImpl* cfh = static_cast(dbfull()->DefaultColumnFamily()); ColumnFamilyData* cfd = cfh->cfd(); @@ -4715,7 +4708,7 @@ TEST_F(DBCompactionTest, CompactionHasEmptyOutput) { ASSERT_OK(Delete("b")); ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0); @@ -4848,7 +4841,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { } for (unsigned int cf = 0; cf < cf_count; cf++) { - dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); } } @@ -4866,7 +4859,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { } // put extra key to trigger flush ASSERT_OK(Put(0, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[0])); ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1, NumTableFilesAtLevel(0, 0)); } @@ -4881,7 +4874,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { } for (unsigned int cf = 0; cf < cf_count; cf++) { - dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -4903,7 +4896,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) { // put extra key to trigger flush ASSERT_OK(Put(cf_test, "", "")); - dbfull()->TEST_WaitForFlushMemTable(handles_[cf_test]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf_test])); ASSERT_EQ(1, NumTableFilesAtLevel(0, cf_test)); Compact(cf_test, Key(0), Key(keyIndex)); @@ -4989,7 +4982,7 @@ TEST_P(CompactionPriTest, Test) { ASSERT_OK(Put(Key(keys[i]), rnd.RandomString(102))); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); for (int i = 0; i < kNKeys; i++) { ASSERT_NE("NOT_FOUND", Get(Key(i))); } @@ -5028,9 +5021,9 @@ TEST_F(DBCompactionTest, PartialManualCompaction) { Random rnd(301); for (auto i = 0; i < 8; ++i) { for (auto j = 0; j < 10; ++j) { - Merge("foo", rnd.RandomString(1024)); + ASSERT_OK(Merge("foo", rnd.RandomString(1024))); } - Flush(); + ASSERT_OK(Flush()); } MoveFilesToLevel(2); @@ -5043,7 +5036,7 @@ TEST_F(DBCompactionTest, PartialManualCompaction) { CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); } TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) { @@ -5060,9 +5053,9 @@ TEST_F(DBCompactionTest, ManualCompactionFailsInReadOnlyMode) { Random rnd(301); for (int i = 0; i < kNumL0Files; ++i) { // Make sure files are overlapping in key-range to prevent trivial move. - Put("key1", rnd.RandomString(1024)); - Put("key2", rnd.RandomString(1024)); - Flush(); + ASSERT_OK(Put("key1", rnd.RandomString(1024))); + ASSERT_OK(Put("key2", rnd.RandomString(1024))); + ASSERT_OK(Flush()); } ASSERT_EQ(kNumL0Files, NumTableFilesAtLevel(0)); @@ -5111,7 +5104,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) { ASSERT_OK( Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } - Flush(); + ASSERT_OK(Flush()); } MoveFilesToLevel(2); @@ -5121,7 +5114,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) { ASSERT_OK( Put("bar" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } - Flush(); + ASSERT_OK(Flush()); } const std::vector& comp_stats = internal_stats_ptr->TEST_GetCompactionStats(); @@ -5130,7 +5123,7 @@ TEST_F(DBCompactionTest, ManualCompactionBottomLevelOptimized) { CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); const std::vector& comp_stats2 = internal_stats_ptr->TEST_GetCompactionStats(); @@ -5155,14 +5148,15 @@ TEST_F(DBCompactionTest, CompactionDuringShutdown) { ASSERT_OK( Put("foo" + std::to_string(i * 10 + j), rnd.RandomString(1024))); } - Flush(); + ASSERT_OK(Flush()); } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial:BeforeRun", [&](void* /*arg*/) { dbfull()->shutting_down_.store(true); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + Status s = dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_TRUE(s.ok() || s.IsShutdownInProgress()); ASSERT_OK(dbfull()->error_handler_.GetBGError()); } @@ -5176,7 +5170,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) { // Generate an external SST file containing a single key, i.e. 99 std::string sst_files_dir = dbname_ + "/sst_files/"; - DestroyDir(env_, sst_files_dir); + ASSERT_OK(DestroyDir(env_, sst_files_dir)); ASSERT_OK(env_->CreateDir(sst_files_dir)); SstFileWriter sst_writer(EnvOptions(), options); const std::string sst_file_path = sst_files_dir + "test.sst"; @@ -5215,7 +5209,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) { // extra key to trigger flush. ASSERT_OK(Put("", "")); } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ(NumTableFilesAtLevel(0 /*level*/, 0 /*cf*/), i + 1); } // When we reach this point, there will be level0_stop_writes_trigger L0 @@ -5263,7 +5257,12 @@ TEST_F(DBCompactionTest, ConsistencyFailTest) { for (int k = 0; k < 2; ++k) { ASSERT_OK(Put("foo", "bar")); - Flush(); + Status s = Flush(); + if (k < 1) { + ASSERT_OK(s); + } else { + ASSERT_TRUE(s.IsCorruption()); + } } ASSERT_NOK(Put("foo", "bar")); @@ -5299,14 +5298,15 @@ TEST_F(DBCompactionTest, ConsistencyFailTest2) { ASSERT_OK(Put("foo1", value)); ASSERT_OK(Put("z", "")); - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(Put("foo2", value)); ASSERT_OK(Put("z", "")); - Flush(); + Status s = Flush(); + ASSERT_TRUE(s.ok() || s.IsCorruption()); // This probably returns non-OK, but we rely on the next Put() // to determine the DB is frozen. - dbfull()->TEST_WaitForCompact(); + ASSERT_NOK(dbfull()->TEST_WaitForCompact()); ASSERT_NOK(Put("foo", "bar")); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -5392,7 +5392,7 @@ TEST_P(DBCompactionTestWithParam, // Put one key, to make biggest log sequence number in this memtable is bigger // than sst which would be ingested in next step. ASSERT_OK(Put(Key(2), "b")); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); std::vector> level_to_files; dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(), @@ -5480,7 +5480,7 @@ TEST_P(DBCompactionTestWithParam, // Wake up flush job sleeping_tasks.WakeUp(); sleeping_tasks.WaitUntilDone(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); uint64_t error_count = 0; @@ -5506,7 +5506,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) { for (int j = 1; j < UCHAR_MAX; j++) { auto key = std::string(kSstNum, '\0'); key[kSstNum - i] += static_cast(j); - Put(key, std::string(i % 1000, 'A')); + ASSERT_OK(Put(key, std::string(i % 1000, 'A'))); } ASSERT_OK(Flush()); } @@ -5516,7 +5516,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) { auto cro = CompactRangeOptions(); cro.bottommost_level_compaction = bottommost_level_compaction_; - db_->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); if (bottommost_level_compaction_ == BottommostLevelCompaction::kForce || bottommost_level_compaction_ == BottommostLevelCompaction::kForceOptimized) { @@ -5555,12 +5555,12 @@ TEST_F(DBCompactionTest, UpdateLevelSubCompactionTest) { // Trigger compaction for (int i = 0; i < 32; i++) { for (int j = 0; j < 5000; j++) { - Put(std::to_string(j), std::string(1, 'A')); + ASSERT_OK(Put(std::to_string(j), std::string(1, 'A'))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(has_compaction); has_compaction = false; @@ -5578,12 +5578,12 @@ TEST_F(DBCompactionTest, UpdateLevelSubCompactionTest) { // Trigger compaction for (int i = 0; i < 32; i++) { for (int j = 0; j < 5000; j++) { - Put(std::to_string(j), std::string(1, 'A')); + ASSERT_OK(Put(std::to_string(j), std::string(1, 'A'))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(has_compaction); } @@ -5606,12 +5606,12 @@ TEST_F(DBCompactionTest, UpdateUniversalSubCompactionTest) { // Trigger compaction for (int i = 0; i < 32; i++) { for (int j = 0; j < 5000; j++) { - Put(std::to_string(j), std::string(1, 'A')); + ASSERT_OK(Put(std::to_string(j), std::string(1, 'A'))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(has_compaction); has_compaction = false; @@ -5629,12 +5629,12 @@ TEST_F(DBCompactionTest, UpdateUniversalSubCompactionTest) { // Trigger compaction for (int i = 0; i < 32; i++) { for (int j = 0; j < 5000; j++) { - Put(std::to_string(j), std::string(1, 'A')); + ASSERT_OK(Put(std::to_string(j), std::string(1, 'A'))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(has_compaction); } @@ -5682,7 +5682,7 @@ TEST_P(ChangeLevelConflictsWithAuto, TestConflict) { ASSERT_OK(Put("bar", "v3")); ASSERT_OK(Put("foo", "v3")); ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); TEST_SYNC_POINT("AutoCompactionFinished2"); }); @@ -5692,7 +5692,7 @@ TEST_P(ChangeLevelConflictsWithAuto, TestConflict) { cro.target_level = GetParam() ? 1 : 0; // This should return non-OK, but it's more important for the test to // make sure that the DB is not corrupted. - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_NOK(dbfull()->CompactRange(cro, nullptr, nullptr)); } auto_comp.join(); // Refitting didn't happen. @@ -5914,25 +5914,25 @@ TEST_F(DBCompactionTest, CompactionWithBlob) { assert(versions); ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault(); - assert(cfd); + ASSERT_NE(cfd, nullptr); Version* const current = cfd->current(); - assert(current); + ASSERT_NE(current, nullptr); const VersionStorageInfo* const storage_info = current->storage_info(); - assert(storage_info); + ASSERT_NE(storage_info, nullptr); const auto& l1_files = storage_info->LevelFiles(1); ASSERT_EQ(l1_files.size(), 1); const FileMetaData* const table_file = l1_files[0]; - assert(table_file); + ASSERT_NE(table_file, nullptr); const auto& blob_files = storage_info->GetBlobFiles(); ASSERT_EQ(blob_files.size(), 1); const auto& blob_file = blob_files.begin()->second; - assert(blob_file); + ASSERT_NE(blob_file, nullptr); ASSERT_EQ(table_file->smallest.user_key(), first_key); ASSERT_EQ(table_file->largest.user_key(), second_key); @@ -5942,7 +5942,7 @@ TEST_F(DBCompactionTest, CompactionWithBlob) { ASSERT_EQ(blob_file->GetTotalBlobCount(), 2); const InternalStats* const internal_stats = cfd->internal_stats(); - assert(internal_stats); + ASSERT_NE(internal_stats, nullptr); const uint64_t expected_bytes = table_file->fd.GetFileSize() + blob_file->GetTotalBlobBytes(); @@ -6018,13 +6018,13 @@ TEST_P(DBCompactionTestBlobError, CompactionError) { assert(versions); ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault(); - assert(cfd); + ASSERT_NE(cfd, nullptr); Version* const current = cfd->current(); - assert(current); + ASSERT_NE(current, nullptr); const VersionStorageInfo* const storage_info = current->storage_info(); - assert(storage_info); + ASSERT_NE(storage_info, nullptr); const auto& l1_files = storage_info->LevelFiles(1); ASSERT_TRUE(l1_files.empty()); @@ -6033,7 +6033,7 @@ TEST_P(DBCompactionTestBlobError, CompactionError) { ASSERT_TRUE(blob_files.empty()); const InternalStats* const internal_stats = cfd->internal_stats(); - assert(internal_stats); + ASSERT_NE(internal_stats, nullptr); const auto& compaction_stats = internal_stats->TEST_GetCompactionStats(); ASSERT_GE(compaction_stats.size(), 2); diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index 9f2013c02..955004304 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -102,7 +102,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) { } // Test compact range works - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK( + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); // All data should be in the last level. ColumnFamilyMetaData cf_meta; db_->GetColumnFamilyMetaData(&cf_meta); @@ -166,8 +167,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "false"}, })); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_EQ(4U, int_prop); @@ -184,8 +185,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "false"}, })); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_EQ(3U, int_prop); ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop)); @@ -205,8 +206,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "false"}, })); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_EQ(3U, int_prop); @@ -234,8 +235,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { })); TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0"); - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); ASSERT_EQ(2U, int_prop); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); @@ -264,7 +265,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { } TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2"); - Flush(); + ASSERT_OK(Flush()); thread.join(); @@ -302,7 +303,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) { DestroyAndReopen(options); // Compact against empty DB - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); uint64_t int_prop; std::string str_prop; @@ -316,13 +317,13 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) { ASSERT_OK( Put(Key(static_cast(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (NumTableFilesAtLevel(0) == 0) { // Make sure level 0 is not empty ASSERT_OK( Put(Key(static_cast(rnd.Uniform(kMaxKey))), rnd.RandomString(80))); - Flush(); + ASSERT_OK(Flush()); } ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); @@ -343,7 +344,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) { }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(output_levels.size(), 2); ASSERT_TRUE(output_levels.find(3) != output_levels.end()); ASSERT_TRUE(output_levels.find(4) != output_levels.end()); @@ -389,8 +390,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) { PutFixed32(&value, static_cast(i)); ASSERT_OK(Put(Key(i), value)); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_EQ(non_trivial, 0); @@ -449,7 +450,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) { ASSERT_OK(Delete(Key(i / 10))); } verify_func(total_keys, false); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); options.level_compaction_dynamic_level_bytes = true; options.disable_auto_compactions = true; @@ -464,7 +465,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) { CompactRangeOptions compact_options; compact_options.change_level = true; compact_options.target_level = options.num_levels - 1; - dbfull()->CompactRange(compact_options, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr)); compaction_finished.store(true); }); do { @@ -484,7 +485,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) { } verify_func(total_keys2, false); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); verify_func(total_keys2, false); // Base level is not level 1 diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 5e1b943df..62f26bf82 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -306,18 +306,21 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) { mutex_.AssertHeld(); WaitForBackgroundWork(); - Status bg_error = error_handler_.GetBGError(); Status s; if (shutdown_initiated_) { // Returning shutdown status to SFM during auto recovery will cause it // to abort the recovery and allow the shutdown to progress s = Status::ShutdownInProgress(); } - if (s.ok() && bg_error.severity() > Status::Severity::kHardError) { - ROCKS_LOG_INFO( - immutable_db_options_.info_log, - "DB resume requested but failed due to Fatal/Unrecoverable error"); - s = bg_error; + + if (s.ok()) { + Status bg_error = error_handler_.GetBGError(); + if (bg_error.severity() > Status::Severity::kHardError) { + ROCKS_LOG_INFO( + immutable_db_options_.info_log, + "DB resume requested but failed due to Fatal/Unrecoverable error"); + s = bg_error; + } } // Make sure the IO Status stored in version set is set to OK. @@ -392,6 +395,11 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) { FindObsoleteFiles(&job_context, true); if (s.ok()) { s = error_handler_.ClearBGError(); + } else { + // NOTE: this is needed to pass ASSERT_STATUS_CHECKED + // in the DBSSTTest.DBWithMaxSpaceAllowedRandomized test. + // See https://github.com/facebook/rocksdb/pull/7715#issuecomment-754947952 + error_handler_.GetRecoveryError().PermitUncheckedError(); } mutex_.Unlock(); @@ -408,6 +416,12 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) { if (file_deletion_disabled) { // Always return ok s = EnableFileDeletions(/*force=*/true); + if (!s.ok()) { + ROCKS_LOG_INFO( + immutable_db_options_.info_log, + "DB resume requested but could not enable file deletions [%s]", + s.ToString().c_str()); + } } ROCKS_LOG_INFO(immutable_db_options_.info_log, "Successfully resumed DB"); } @@ -3573,7 +3587,7 @@ Status DBImpl::DeleteFile(std::string name) { Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family, const RangePtr* ranges, size_t n, bool include_end) { - Status status; + Status status = Status::OK(); auto cfh = static_cast_with_check(column_family); ColumnFamilyData* cfd = cfh->cfd(); VersionEdit edit; @@ -3632,7 +3646,7 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family, } if (edit.GetDeletedFiles().empty()) { job_context.Clean(); - return Status::OK(); + return status; } input_version->Ref(); status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 3e20949ae..49f6b6593 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -35,8 +35,10 @@ bool DBImpl::EnoughRoomForCompaction( // Pass the current bg_error_ to SFM so it can decide what checks to // perform. If this DB instance hasn't seen any error yet, the SFM can be // optimistic and not do disk space checks - enough_room = - sfm->EnoughRoomForCompaction(cfd, inputs, error_handler_.GetBGError()); + Status bg_error = error_handler_.GetBGError(); + enough_room = sfm->EnoughRoomForCompaction(cfd, inputs, bg_error); + bg_error.PermitUncheckedError(); // bg_error is just a copy of the Status + // from the error_handler_ if (enough_room) { *sfm_reserved_compact_space = true; } diff --git a/db/db_io_failure_test.cc b/db/db_io_failure_test.cc index 1fcaa6904..232ae649c 100644 --- a/db/db_io_failure_test.cc +++ b/db/db_io_failure_test.cc @@ -43,11 +43,15 @@ TEST_F(DBIOFailureTest, DropWrites) { if (level > 0 && level == dbfull()->NumberLevels() - 1) { break; } - dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr, - true /* disallow trivial move */); + Status s = + dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr, + true /* disallow trivial move */); + ASSERT_TRUE(s.ok() || s.IsCorruption()); } } else { - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + Status s = + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_TRUE(s.ok() || s.IsCorruption()); } } @@ -56,7 +60,8 @@ TEST_F(DBIOFailureTest, DropWrites) { ASSERT_EQ("5", property_value); env_->drop_writes_.store(false, std::memory_order_release); - ASSERT_LT(CountFiles(), num_files + 3); + const size_t count = CountFiles(); + ASSERT_LT(count, num_files + 3); // Check that compaction attempts slept after errors // TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler @@ -82,7 +87,8 @@ TEST_F(DBIOFailureTest, DropWritesFlush) { ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); ASSERT_EQ("0", property_value); - dbfull()->TEST_FlushMemTable(true); + // ASSERT file is too short + ASSERT_TRUE(dbfull()->TEST_FlushMemTable(true).IsCorruption()); ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); ASSERT_EQ("1", property_value); @@ -166,7 +172,7 @@ TEST_F(DBIOFailureTest, ManifestWriteError) { ASSERT_EQ("bar", Get("foo")); // Memtable compaction (will succeed) - Flush(); + ASSERT_OK(Flush()); ASSERT_EQ("bar", Get("foo")); const int last = 2; MoveFilesToLevel(2); @@ -174,7 +180,8 @@ TEST_F(DBIOFailureTest, ManifestWriteError) { // Merging compaction (will fail) error_type->store(true, std::memory_order_release); - dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail + ASSERT_NOK( + dbfull()->TEST_CompactRange(last, nullptr, nullptr)); // Should fail ASSERT_EQ("bar", Get("foo")); error_type->store(false, std::memory_order_release); @@ -192,7 +199,13 @@ TEST_F(DBIOFailureTest, ManifestWriteError) { // Merging compaction (will fail) error_type->store(true, std::memory_order_release); - dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail + Status s = + dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail + if (iter == 0) { + ASSERT_OK(s); + } else { + ASSERT_TRUE(s.IsIOError()); + } ASSERT_EQ("bar", Get("foo")); // Recovery: should not lose data @@ -220,18 +233,15 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) { options.paranoid_checks = true; DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo1", "bar1")); // simulate error env_->log_write_error_.store(true, std::memory_order_release); - s = Put(1, "foo2", "bar2"); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(Put(1, "foo2", "bar2")); env_->log_write_error_.store(false, std::memory_order_release); - s = Put(1, "foo3", "bar3"); // the next put should fail, too - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(Put(1, "foo3", "bar3")); // but we're still able to read ASSERT_EQ("bar", Get(1, "foo")); @@ -244,12 +254,10 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) { ASSERT_OK(Put(1, "foo1", "bar1")); // simulate error env_->log_write_error_.store(true, std::memory_order_release); - s = Put(1, "foo2", "bar2"); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(Put(1, "foo2", "bar2")); env_->log_write_error_.store(false, std::memory_order_release); - s = Put(1, "foo3", "bar3"); // the next put should NOT fail - ASSERT_TRUE(s.ok()); + ASSERT_OK(Put(1, "foo3", "bar3")); } #if !(defined NDEBUG) || !defined(OS_WIN) TEST_F(DBIOFailureTest, FlushSstRangeSyncError) { @@ -269,14 +277,14 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; + const char* io_error_msg = "range sync dummy error"; std::atomic range_sync_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::RangeSync", [&](void* arg) { if (range_sync_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("range sync dummy error"); + *st = Status::IOError(io_error_msg); } }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); @@ -298,7 +306,9 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) { ASSERT_OK(Put(1, "foo3_2", rnd_str)); ASSERT_OK(Put(1, "foo3_3", rnd_str)); ASSERT_OK(Put(1, "foo4", "bar")); - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as flush failed. ASSERT_NOK(Put(1, "foo2", "bar3")); @@ -328,7 +338,6 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { options.table_factory.reset(NewBlockBasedTableFactory(table_options)); DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; Random rnd(301); std::string rnd_str = @@ -342,21 +351,22 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { ASSERT_OK(Put(1, "foo1_1", rnd_str)); ASSERT_OK(Put(1, "foo1_2", rnd_str)); ASSERT_OK(Put(1, "foo1_3", rnd_str)); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo3_1", rnd_str)); ASSERT_OK(Put(1, "foo3_2", rnd_str)); ASSERT_OK(Put(1, "foo3_3", rnd_str)); ASSERT_OK(Put(1, "foo4", "bar")); - Flush(1); - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1])); + const char* io_error_msg = "range sync dummy error"; std::atomic range_sync_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::RangeSync", [&](void* arg) { if (range_sync_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("range sync dummy error"); + *st = Status::IOError(io_error_msg); } }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); @@ -365,7 +375,9 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { { {"disable_auto_compactions", "false"}, })); - dbfull()->TEST_WaitForCompact(); + Status s = dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as flush failed. ASSERT_NOK(Put(1, "foo2", "bar3")); @@ -389,13 +401,14 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; + + const char* io_error_msg = "close dummy error"; std::atomic close_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::Close", [&](void* arg) { if (close_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("close dummy error"); + *st = Status::IOError(io_error_msg); } }); @@ -404,7 +417,9 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) { ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo", "bar2")); - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as flush failed. ASSERT_NOK(Put(1, "foo2", "bar3")); @@ -429,25 +444,25 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "foo", "bar3")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + const char* io_error_msg = "close dummy error"; std::atomic close_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::Close", [&](void* arg) { if (close_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("close dummy error"); + *st = Status::IOError(io_error_msg); } }); @@ -456,7 +471,9 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) { { {"disable_auto_compactions", "false"}, })); - dbfull()->TEST_WaitForCompact(); + Status s = dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as compaction failed. ASSERT_NOK(Put(1, "foo2", "bar3")); @@ -480,13 +497,14 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; + + const char* io_error_msg = "sync dummy error"; std::atomic sync_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::Sync", [&](void* arg) { if (sync_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("sync dummy error"); + *st = Status::IOError(io_error_msg); } }); @@ -495,7 +513,9 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) { ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo1", "bar1")); ASSERT_OK(Put(1, "foo", "bar2")); - dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as flush failed. ASSERT_NOK(Put(1, "foo2", "bar3")); @@ -521,25 +541,25 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Status s; ASSERT_OK(Put(1, "foo", "bar")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "foo", "bar2")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "foo", "bar3")); ASSERT_OK(Put(1, "foo2", "bar")); - Flush(1); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + const char* io_error_msg = "sync dummy error"; std::atomic sync_called(0); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "SpecialEnv::SStableFile::Sync", [&](void* arg) { if (sync_called.fetch_add(1) == 0) { Status* st = static_cast(arg); - *st = Status::IOError("close dummy error"); + *st = Status::IOError(io_error_msg); } }); @@ -548,7 +568,9 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) { { {"disable_auto_compactions", "false"}, })); - dbfull()->TEST_WaitForCompact(); + Status s = dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(s.IsIOError()); + ASSERT_STREQ(s.getState(), io_error_msg); // Following writes should fail as compaction failed. ASSERT_NOK(Put(1, "foo2", "bar3")); diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 1239b8b62..ed1d50d3d 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -98,7 +98,7 @@ TEST_F(DBSSTTest, SSTsWithLdbSuffixHandling) { for (int i = 0; i < 10; ++i) { GenerateNewFile(&rnd, &key_id, false); } - Flush(); + ASSERT_OK(Flush()); Close(); int const num_files = GetSstFileCount(dbname_); ASSERT_GT(num_files, 0); @@ -393,7 +393,7 @@ TEST_F(DBSSTTest, RateLimitedDelete) { WriteOptions wo; wo.disableWAL = true; - ASSERT_OK(TryReopen(options)); + Reopen(options); // Create 4 files in L0 for (char v = 'a'; v <= 'd'; v++) { ASSERT_OK(Put("Key2", DummyString(1024, v), wo)); @@ -540,7 +540,7 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) { auto sfm = static_cast(options.sst_file_manager.get()); sfm->delete_scheduler()->SetMaxTrashDBRatio(3.1); - ASSERT_OK(TryReopen(options)); + Reopen(options); // Create 4 files in L0 for (char v = 'a'; v <= 'd'; v++) { @@ -567,11 +567,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) { if (!wal_dir_same_as_dbname_) { // Forcibly create some trash log files std::unique_ptr result; - env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result, - EnvOptions()); + ASSERT_OK(env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result, + EnvOptions())); result.reset(); } - env->GetChildren(options.wal_dir, &filenames); + ASSERT_OK(env->GetChildren(options.wal_dir, &filenames)); for (const std::string& fname : filenames) { if (fname.find(".log.trash") != std::string::npos) { trash_log_count++; @@ -580,11 +580,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) { ASSERT_GE(trash_log_count, 1); env->set_fake_log_delete(false); - ASSERT_OK(TryReopen(options)); + Reopen(options); filenames.clear(); trash_log_count = 0; - env->GetChildren(options.wal_dir, &filenames); + ASSERT_OK(env->GetChildren(options.wal_dir, &filenames)); for (const std::string& fname : filenames) { if (fname.find(".log.trash") != std::string::npos) { trash_log_count++; @@ -614,7 +614,7 @@ TEST_F(DBSSTTest, OpenDBWithExistingTrash) { ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash")); // Reopen the DB and verify that it deletes existing trash files - ASSERT_OK(TryReopen(options)); + Reopen(options); sfm->WaitForEmptyTrash(); ASSERT_NOK(env_->FileExists(dbname_ + "/" + "001.sst.trash")); ASSERT_NOK(env_->FileExists(dbname_ + "/" + "002.sst.trash")); @@ -872,10 +872,12 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) { ASSERT_OK(Flush()); // OK, now trigger a manual compaction - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_TRUE(dbfull() + ->CompactRange(CompactRangeOptions(), nullptr, nullptr) + .IsCompactionTooLarge()); // Wait for manual compaction to get scheduled and finish - dbfull()->TEST_WaitForCompact(true); + ASSERT_OK(dbfull()->TEST_WaitForCompact(true)); ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0); // Make sure the stat is bumped @@ -885,10 +887,13 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) { // Now make sure CompactFiles also gets cancelled auto l0_files = collector->GetFlushedFiles(); - dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0); + ASSERT_TRUE( + dbfull() + ->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0) + .IsCompactionTooLarge()); // Wait for manual compaction to get scheduled and finish - dbfull()->TEST_WaitForCompact(true); + ASSERT_OK(dbfull()->TEST_WaitForCompact(true)); ASSERT_EQ(dbfull()->immutable_db_options().statistics.get()->getTickerCount( COMPACTION_CANCELLED), @@ -903,8 +908,9 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) { "CompactFilesImpl:End", [&](void* /*arg*/) { completed_compactions++; }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0); - dbfull()->TEST_WaitForCompact(true); + ASSERT_OK(dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), + l0_files, 0)); + ASSERT_OK(dbfull()->TEST_WaitForCompact(true)); ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0); ASSERT_GT(completed_compactions, 0); @@ -1008,7 +1014,7 @@ TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) { CompactRangeOptions compact_options; compact_options.change_level = true; compact_options.target_level = 2; - db_->CompactRange(compact_options, nullptr, nullptr); + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); // Create 12 Files in L0 for (int i = 0; i < 12; i++) { @@ -1060,7 +1066,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { std::string val = "val_file_" + ToString(i); ASSERT_OK(Put(Key(j), val)); } - Flush(); + ASSERT_OK(Flush()); } ASSERT_EQ("5", FilesPerLevel(0)); @@ -1084,6 +1090,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // hold current version std::unique_ptr iter1(dbfull()->NewIterator(ReadOptions())); + ASSERT_OK(iter1->status()); // Compact 5 files into 1 file in L0 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); @@ -1107,12 +1114,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // hold current version std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); + ASSERT_OK(iter2->status()); // Delete all keys and compact, this will delete all live files for (int i = 0; i < 10; i++) { ASSERT_OK(Delete(Key(i))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ("", FilesPerLevel(0)); @@ -1126,6 +1134,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // Total SST files = 6 (5 original files + compacted file) ASSERT_EQ(total_sst_files_size, 6 * single_file_size); + ASSERT_OK(iter1->status()); iter1.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); @@ -1133,6 +1142,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // Total SST files = 1 (compacted file) ASSERT_EQ(total_sst_files_size, 1 * single_file_size); + ASSERT_OK(iter2->status()); iter2.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); @@ -1151,7 +1161,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { // Generate 5 files in L0 for (int i = 0; i < 5; i++) { ASSERT_OK(Put(Key(i), "val")); - Flush(); + ASSERT_OK(Flush()); } ASSERT_EQ("5", FilesPerLevel(0)); @@ -1176,6 +1186,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { // hold current version std::unique_ptr iter1(dbfull()->NewIterator(ReadOptions())); + ASSERT_OK(iter1->status()); // Compaction will do trivial move from L0 to L1 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); @@ -1199,12 +1210,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { // hold current version std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); + ASSERT_OK(iter2->status()); // Delete all keys and compact, this will delete all live files for (int i = 0; i < 5; i++) { ASSERT_OK(Delete(Key(i))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ("", FilesPerLevel(0)); @@ -1218,7 +1230,9 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { // Total SST files = 5 (used in 2 version) ASSERT_EQ(total_sst_files_size, 5 * single_file_size); + ASSERT_OK(iter1->status()); iter1.reset(); + ASSERT_OK(iter2->status()); iter2.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index e282e807e..d77168d96 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -31,6 +31,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSingle) { std::unique_ptr iter(db_->NewIterator(read_options)); iter->SeekToFirst(); ASSERT_TRUE(!iter->Valid()); + ASSERT_OK(iter->status()); // add a record and check that iter can see it ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor")); @@ -48,6 +49,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorKeepAdding) { read_options.tailing = true; std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); std::string value(1024, 'a'); const int num_records = 10000; @@ -70,7 +72,9 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToNext) { read_options.tailing = true; std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); std::unique_ptr itern(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(itern->status()); std::string value(1024, 'a'); const int num_records = 1000; @@ -138,8 +142,11 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { Slice keyu(bufe, 20); read_options.iterate_upper_bound = &keyu; std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); std::unique_ptr itern(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(itern->status()); std::unique_ptr iterh(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iterh->status()); std::string value(1024, 'a'); bool file_iters_deleted = false; bool file_iters_renewed_null = false; @@ -225,6 +232,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { ReopenWithColumnFamilies({"default", "pikachu"}, options); read_options.read_tier = kBlockCacheTier; std::unique_ptr iteri(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iteri->status()); char buf5[32]; snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2); Slice target1(buf5, 20); @@ -236,6 +244,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { options.table_factory.reset(NewBlockBasedTableFactory()); ReopenWithColumnFamilies({"default", "pikachu"}, options); iter.reset(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); for (int i = 2 * num_records; i > 0; --i) { char buf1[32]; char buf2[32]; @@ -262,6 +271,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorDeletes) { read_options.tailing = true; std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); // write a single record, read it using the iterator, then delete it ASSERT_OK(Put(1, "0test", "test")); @@ -309,6 +319,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorPrefixSeek) { CreateAndReopenWithCF({"pikachu"}, options); std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(iter->status()); ASSERT_OK(Put(1, "0101", "test")); ASSERT_OK(Flush(1)); @@ -339,6 +350,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorIncomplete) { ASSERT_OK(db_->Put(WriteOptions(), key, value)); std::unique_ptr iter(db_->NewIterator(read_options)); + ASSERT_OK(iter->status()); iter->SeekToFirst(); // we either see the entry or it's not in cache ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); @@ -369,6 +381,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToSame) { } std::unique_ptr iter(db_->NewIterator(read_options)); + ASSERT_OK(iter->status()); // Seek to 00001. We expect to find 00002. std::string start_key = "00001"; iter->Seek(start_key); @@ -404,6 +417,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { ASSERT_OK(Put(1, "21", "21")); std::unique_ptr it(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(it->status()); it->Seek("12"); ASSERT_TRUE(it->Valid()); ASSERT_EQ("12", it->key().ToString()); @@ -479,6 +493,8 @@ TEST_F(DBTestTailingIterator, TailingIteratorGap) { it->Next(); ASSERT_TRUE(it->Valid()); ASSERT_EQ("40", it->key().ToString()); + + ASSERT_OK(it->status()); } TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) { @@ -497,6 +513,7 @@ TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) { ASSERT_OK(Flush()); std::unique_ptr iter(db_->NewIterator(read_options)); + ASSERT_OK(iter->status()); iter->Seek("aa"); ASSERT_TRUE(iter->Valid()); @@ -519,6 +536,7 @@ TEST_F(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) { ASSERT_OK(Flush()); std::unique_ptr iter(db_->NewIterator(read_options)); + ASSERT_OK(iter->status()); iter->SeekToFirst(); ASSERT_TRUE(iter->Valid()); diff --git a/db/db_test.cc b/db/db_test.cc index 2b0b1ea49..dbe4a161b 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -1337,17 +1337,19 @@ TEST_F(DBTest, ApproximateSizesMemTable) { SizeApproximationOptions size_approx_options; size_approx_options.include_memtabtles = true; size_approx_options.include_files = true; - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_GT(size, 6000); ASSERT_LT(size, 204800); // Zero if not including mem table - db_->GetApproximateSizes(&r, 1, &size); + ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size)); ASSERT_EQ(size, 0); start = Key(500); end = Key(600); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_EQ(size, 0); for (int i = 0; i < N; i++) { @@ -1357,13 +1359,15 @@ TEST_F(DBTest, ApproximateSizesMemTable) { start = Key(500); end = Key(600); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_EQ(size, 0); start = Key(100); end = Key(1020); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_GT(size, 6000); options.max_write_buffer_number = 8; @@ -1389,29 +1393,32 @@ TEST_F(DBTest, ApproximateSizesMemTable) { start = Key(100); end = Key(300); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_EQ(size, 0); start = Key(1050); end = Key(1080); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_GT(size, 6000); start = Key(2100); end = Key(2300); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_EQ(size, 0); start = Key(1050); end = Key(1080); r = Range(start, end); uint64_t size_with_mt, size_without_mt; - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, - &size_with_mt); + ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, + &size_with_mt)); ASSERT_GT(size_with_mt, 6000); - db_->GetApproximateSizes(&r, 1, &size_without_mt); + ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt)); ASSERT_EQ(size_without_mt, 0); Flush(); @@ -1423,15 +1430,16 @@ TEST_F(DBTest, ApproximateSizesMemTable) { start = Key(1050); end = Key(1080); r = Range(start, end); - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, - &size_with_mt); - db_->GetApproximateSizes(&r, 1, &size_without_mt); + ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, + &size_with_mt)); + ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt)); ASSERT_GT(size_with_mt, size_without_mt); ASSERT_GT(size_without_mt, 6000); // Check that include_memtabtles flag works as expected size_approx_options.include_memtabtles = false; - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); ASSERT_EQ(size, size_without_mt); // Check that files_size_error_margin works as expected, when the heuristic @@ -1440,10 +1448,12 @@ TEST_F(DBTest, ApproximateSizesMemTable) { end = Key(1000 + N - 2); r = Range(start, end); size_approx_options.files_size_error_margin = -1.0; // disabled - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size)); uint64_t size2; size_approx_options.files_size_error_margin = 0.5; // enabled, but not used - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2); + ASSERT_OK( + db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2)); ASSERT_EQ(size, size2); } @@ -1494,14 +1504,16 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) { // Get the precise size without any approximation heuristic uint64_t size; - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size); + ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, + &size)); ASSERT_NE(size, 0); // Get the size with an approximation heuristic uint64_t size2; const double error_margin = 0.2; size_approx_options.files_size_error_margin = error_margin; - db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2); + ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, + &size2)); ASSERT_LT(size2, size * (1 + error_margin)); ASSERT_GT(size2, size * (1 - error_margin)); } @@ -1517,7 +1529,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) { const std::string end = Key(i + 11); // overlap by 1 key const Range r(start, end); uint64_t size; - db_->GetApproximateSizes(&r, 1, &size); + ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size)); ASSERT_LE(size, 11 * 100); } } @@ -1585,9 +1597,12 @@ TEST_F(DBTest, ApproximateSizes) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); + uint64_t size; + ASSERT_OK(Size("", "xyz", 1, &size)); + ASSERT_TRUE(Between(size, 0, 0)); ReopenWithColumnFamilies({"default", "pikachu"}, options); - ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); + ASSERT_OK(Size("", "xyz", 1, &size)); + ASSERT_TRUE(Between(size, 0, 0)); // Write 8MB (80 values, each 100K) ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); @@ -1600,7 +1615,8 @@ TEST_F(DBTest, ApproximateSizes) { } // 0 because GetApproximateSizes() does not account for memtable space - ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0)); + ASSERT_OK(Size("", Key(50), 1, &size)); + ASSERT_TRUE(Between(size, 0, 0)); // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { @@ -1608,14 +1624,17 @@ TEST_F(DBTest, ApproximateSizes) { for (int compact_start = 0; compact_start < N; compact_start += 10) { for (int i = 0; i < N; i += 10) { - ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i)); - ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1), - S2 * (i + 1))); - ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10)); + ASSERT_OK(Size("", Key(i), 1, &size)); + ASSERT_TRUE(Between(size, S1 * i, S2 * i)); + ASSERT_OK(Size("", Key(i) + ".suffix", 1, &size)); + ASSERT_TRUE(Between(size, S1 * (i + 1), S2 * (i + 1))); + ASSERT_OK(Size(Key(i), Key(i + 10), 1, &size)); + ASSERT_TRUE(Between(size, S1 * 10, S2 * 10)); } - ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50)); - ASSERT_TRUE( - Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50)); + ASSERT_OK(Size("", Key(50), 1, &size)); + ASSERT_TRUE(Between(size, S1 * 50, S2 * 50)); + ASSERT_OK(Size("", Key(50) + ".suffix", 1, &size)); + ASSERT_TRUE(Between(size, S1 * 50, S2 * 50)); std::string cstart_str = Key(compact_start); std::string cend_str = Key(compact_start + 9); @@ -1650,21 +1669,32 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000))); // Check sizes across recovery by reopening a few times + uint64_t size; for (int run = 0; run < 3; run++) { ReopenWithColumnFamilies({"default", "pikachu"}, options); - ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0)); - ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000)); - ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000)); - ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000)); - ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000)); - ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 232000)); - ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 242000)); + ASSERT_OK(Size("", Key(0), 1, &size)); + ASSERT_TRUE(Between(size, 0, 0)); + ASSERT_OK(Size("", Key(1), 1, &size)); + ASSERT_TRUE(Between(size, 10000, 11000)); + ASSERT_OK(Size("", Key(2), 1, &size)); + ASSERT_TRUE(Between(size, 20000, 21000)); + ASSERT_OK(Size("", Key(3), 1, &size)); + ASSERT_TRUE(Between(size, 120000, 121000)); + ASSERT_OK(Size("", Key(4), 1, &size)); + ASSERT_TRUE(Between(size, 130000, 131000)); + ASSERT_OK(Size("", Key(5), 1, &size)); + ASSERT_TRUE(Between(size, 230000, 232000)); + ASSERT_OK(Size("", Key(6), 1, &size)); + ASSERT_TRUE(Between(size, 240000, 242000)); // Ensure some overhead is accounted for, even without including all - ASSERT_TRUE(Between(Size("", Key(7), 1), 540500, 545000)); - ASSERT_TRUE(Between(Size("", Key(8), 1), 550500, 555000)); + ASSERT_OK(Size("", Key(7), 1, &size)); + ASSERT_TRUE(Between(size, 540500, 545000)); + ASSERT_OK(Size("", Key(8), 1, &size)); + ASSERT_TRUE(Between(size, 550500, 555000)); - ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110100, 111000)); + ASSERT_OK(Size(Key(3), Key(5), 1, &size)); + ASSERT_TRUE(Between(size, 110100, 111000)); dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); } @@ -1748,6 +1778,7 @@ TEST_F(DBTest, Snapshot) { TEST_F(DBTest, HiddenValuesAreRemoved) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; + uint64_t size; do { Options options = CurrentOptions(options_override); CreateAndReopenWithCF({"pikachu"}, options); @@ -1765,7 +1796,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(big, Get(1, "foo", snapshot)); - ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000)); + ASSERT_OK(Size("", "pastfoo", 1, &size)); + ASSERT_TRUE(Between(size, 50000, 60000)); db_->ReleaseSnapshot(snapshot); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]"); Slice x("x"); @@ -1776,7 +1808,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); - ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000)); + ASSERT_OK(Size("", "pastfoo", 1, &size)); + ASSERT_TRUE(Between(size, 0, 1000)); // ApproximateOffsetOf() is not yet implemented in plain table format, // which is used by Size(). } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 2dbaee38f..93e1a42ed 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1128,27 +1128,48 @@ std::string DBTestBase::FilesPerLevel(int cf) { #endif // !ROCKSDB_LITE size_t DBTestBase::CountFiles() { + size_t count = 0; std::vector files; - EXPECT_OK(env_->GetChildren(dbname_, &files)); + if (env_->GetChildren(dbname_, &files).ok()) { + count += files.size(); + } - std::vector logfiles; if (dbname_ != last_options_.wal_dir) { - Status s = env_->GetChildren(last_options_.wal_dir, &logfiles); - EXPECT_TRUE(s.ok() || s.IsNotFound()); + if (env_->GetChildren(last_options_.wal_dir, &files).ok()) { + count += files.size(); + } + } + + return count; +}; + +Status DBTestBase::CountFiles(size_t* count) { + std::vector files; + Status s = env_->GetChildren(dbname_, &files); + if (!s.ok()) { + return s; + } + size_t files_count = files.size(); + + if (dbname_ != last_options_.wal_dir) { + s = env_->GetChildren(last_options_.wal_dir, &files); + if (!s.ok()) { + return s; + } + *count = files_count + files.size(); } - return files.size() + logfiles.size(); + return Status::OK(); } -uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) { +Status DBTestBase::Size(const Slice& start, const Slice& limit, int cf, + uint64_t* size) { Range r(start, limit); - uint64_t size; if (cf == 0) { - db_->GetApproximateSizes(&r, 1, &size); + return db_->GetApproximateSizes(&r, 1, size); } else { - db_->GetApproximateSizes(handles_[1], &r, 1, &size); + return db_->GetApproximateSizes(handles_[1], &r, 1, size); } - return size; } void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit, diff --git a/db/db_test_util.h b/db/db_test_util.h index 2a511ae48..41eb7b020 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -1070,7 +1070,13 @@ class DBTestBase : public testing::Test { size_t CountFiles(); - uint64_t Size(const Slice& start, const Slice& limit, int cf = 0); + Status CountFiles(size_t* count); + + Status Size(const Slice& start, const Slice& limit, uint64_t* size) { + return Size(start, limit, 0, size); + } + + Status Size(const Slice& start, const Slice& limit, int cf, uint64_t* size); void Compact(int cf, const Slice& start, const Slice& limit, uint32_t target_path_id); diff --git a/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc index 93afa240d..5d99814c2 100644 --- a/db/db_with_timestamp_basic_test.cc +++ b/db/db_with_timestamp_basic_test.cc @@ -270,7 +270,7 @@ TEST_F(DBBasicTestWithTimestamp, GetApproximateSizes) { ASSERT_EQ(range_sizes[1], size); // Zero if not including mem table - db_->GetApproximateSizes(&r, 1, &size); + ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size)); ASSERT_EQ(size, 0); start = Key(500); diff --git a/db/event_helpers.cc b/db/event_helpers.cc index b9fa35e33..23fb43a83 100644 --- a/db/event_helpers.cc +++ b/db/event_helpers.cc @@ -213,17 +213,16 @@ void EventHelpers::NotifyOnErrorRecoveryCompleted( const std::vector>& listeners, Status old_bg_error, InstrumentedMutex* db_mutex) { #ifndef ROCKSDB_LITE - if (listeners.size() == 0U) { - return; - } - db_mutex->AssertHeld(); - // release lock while notifying events - db_mutex->Unlock(); - for (auto& listener : listeners) { - listener->OnErrorRecoveryCompleted(old_bg_error); + if (listeners.size() > 0) { + db_mutex->AssertHeld(); + // release lock while notifying events + db_mutex->Unlock(); + for (auto& listener : listeners) { + listener->OnErrorRecoveryCompleted(old_bg_error); + } + db_mutex->Lock(); } old_bg_error.PermitUncheckedError(); - db_mutex->Lock(); #else (void)listeners; (void)old_bg_error; diff --git a/file/delete_scheduler.cc b/file/delete_scheduler.cc index b0c17bfc0..ed1755fd7 100644 --- a/file/delete_scheduler.cc +++ b/file/delete_scheduler.cc @@ -98,11 +98,13 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path, // Update the total trash size uint64_t trash_file_size = 0; - Status ignored = + IOStatus io_s = fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr); - ignored.PermitUncheckedError(); //**TODO: What should we do if we failed to - // get the file size? - total_trash_size_.fetch_add(trash_file_size); + if (io_s.ok()) { + total_trash_size_.fetch_add(trash_file_size); + } + //**TODO: What should we do if we failed to + // get the file size? // Add file to delete queue { @@ -199,9 +201,7 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path, cnt++; } if (s.ok()) { - //**TODO: What should we do if this returns an error? - sst_file_manager_->OnMoveFile(file_path, *trash_file) - .PermitUncheckedError(); + s = sst_file_manager_->OnMoveFile(file_path, *trash_file); } return s; } diff --git a/file/sst_file_manager_impl.cc b/file/sst_file_manager_impl.cc index 46d1a8920..35aa667d2 100644 --- a/file/sst_file_manager_impl.cc +++ b/file/sst_file_manager_impl.cc @@ -158,7 +158,7 @@ bool SstFileManagerImpl::IsMaxAllowedSpaceReachedIncludingCompactions() { bool SstFileManagerImpl::EnoughRoomForCompaction( ColumnFamilyData* cfd, const std::vector& inputs, - Status bg_error) { + const Status& bg_error) { MutexLock l(&mu_); uint64_t size_added_by_compaction = 0; // First check if we even have the space to do the compaction @@ -183,7 +183,7 @@ bool SstFileManagerImpl::EnoughRoomForCompaction( // seen a NoSpace() error. This is tin order to contain a single potentially // misbehaving DB instance and prevent it from slowing down compactions of // other DB instances - if (bg_error == Status::NoSpace() && CheckFreeSpace()) { + if (bg_error.IsNoSpace() && CheckFreeSpace()) { auto fn = TableFileName(cfd->ioptions()->cf_paths, inputs[0][0]->fd.GetNumber(), inputs[0][0]->fd.GetPathId()); diff --git a/file/sst_file_manager_impl.h b/file/sst_file_manager_impl.h index 516627f85..cb0e7fbbb 100644 --- a/file/sst_file_manager_impl.h +++ b/file/sst_file_manager_impl.h @@ -22,7 +22,7 @@ namespace ROCKSDB_NAMESPACE { class Env; class Logger; -// SstFileManager is used to track SST files in the DB and control there +// SstFileManager is used to track SST files in the DB and control their // deletion rate. // All SstFileManager public functions are thread-safe. class SstFileManagerImpl : public SstFileManager { @@ -77,7 +77,7 @@ class SstFileManagerImpl : public SstFileManager { // the full compaction size). bool EnoughRoomForCompaction(ColumnFamilyData* cfd, const std::vector& inputs, - Status bg_error); + const Status& bg_error); // Bookkeeping so total_file_sizes_ goes back to normal after compaction // finishes diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 66808ed10..ed3382a7a 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -496,13 +496,13 @@ extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( rocksdb_t* db, int num_ranges, const char* const* range_start_key, const size_t* range_start_key_len, const char* const* range_limit_key, - const size_t* range_limit_key_len, uint64_t* sizes); + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( rocksdb_t* db, rocksdb_column_family_handle_t* column_family, int num_ranges, const char* const* range_start_key, const size_t* range_start_key_len, const char* const* range_limit_key, - const size_t* range_limit_key_len, uint64_t* sizes); + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, const char* start_key, diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 995d9f0f1..ee5f4acdd 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -1027,20 +1027,22 @@ class DB { // Simpler versions of the GetApproximateSizes() method above. // The include_flags argumenbt must of type DB::SizeApproximationFlags // and can not be NONE. - virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, - const Range* ranges, int n, uint64_t* sizes, - uint8_t include_flags = INCLUDE_FILES) { + virtual Status GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* ranges, int n, + uint64_t* sizes, + uint8_t include_flags = INCLUDE_FILES) { SizeApproximationOptions options; options.include_memtabtles = (include_flags & SizeApproximationFlags::INCLUDE_MEMTABLES) != 0; options.include_files = (include_flags & SizeApproximationFlags::INCLUDE_FILES) != 0; - Status s = GetApproximateSizes(options, column_family, ranges, n, sizes); - s.PermitUncheckedError(); + return GetApproximateSizes(options, column_family, ranges, n, sizes); } - virtual void GetApproximateSizes(const Range* ranges, int n, uint64_t* sizes, - uint8_t include_flags = INCLUDE_FILES) { - GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes, include_flags); + virtual Status GetApproximateSizes(const Range* ranges, int n, + uint64_t* sizes, + uint8_t include_flags = INCLUDE_FILES) { + return GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes, + include_flags); } // The method is similar to GetApproximateSizes, except it diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 20ca091c5..a28639932 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -1452,20 +1452,23 @@ void BlockBasedTableBuilder::WriteIndexBlock( } } // If there are more index partitions, finish them and write them out - Status s = index_builder_status; - while (ok() && s.IsIncomplete()) { - s = rep_->index_builder->Finish(&index_blocks, *index_block_handle); - if (!s.ok() && !s.IsIncomplete()) { - rep_->SetStatus(s); - return; - } - if (rep_->table_options.enable_index_compression) { - WriteBlock(index_blocks.index_block_contents, index_block_handle, false); - } else { - WriteRawBlock(index_blocks.index_block_contents, kNoCompression, - index_block_handle); + if (index_builder_status.IsIncomplete()) { + Status s = Status::Incomplete(); + while (ok() && s.IsIncomplete()) { + s = rep_->index_builder->Finish(&index_blocks, *index_block_handle); + if (!s.ok() && !s.IsIncomplete()) { + rep_->SetStatus(s); + return; + } + if (rep_->table_options.enable_index_compression) { + WriteBlock(index_blocks.index_block_contents, index_block_handle, + false); + } else { + WriteRawBlock(index_blocks.index_block_contents, kNoCompression, + index_block_handle); + } + // The last index_block_handle will be for the partition index block } - // The last index_block_handle will be for the partition index block } } diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index ecd342a9a..cf5819042 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -2493,14 +2493,12 @@ void ApproxSizeCommand::DoCommand() { Range ranges[1]; ranges[0] = Range(start_key_, end_key_); uint64_t sizes[1]; - db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes); - fprintf(stdout, "%lu\n", (unsigned long)sizes[0]); - /* Weird that GetApproximateSizes() returns void, although documentation - * says that it returns a Status object. - if (!st.ok()) { - exec_state_ = LDBCommandExecuteResult::Failed(st.ToString()); + Status s = db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes); + if (!s.ok()) { + exec_state_ = LDBCommandExecuteResult::Failed(s.ToString()); + } else { + fprintf(stdout, "%lu\n", (unsigned long)sizes[0]); } - */ } // ----------------------------------------------------------------------------