diff --git a/Makefile b/Makefile index d1eaed72f..936bae682 100644 --- a/Makefile +++ b/Makefile @@ -587,6 +587,7 @@ ifdef ASSERT_STATUS_CHECKED blob_file_reader_test \ bloom_test \ cassandra_format_test \ + cassandra_functional_test \ cassandra_row_merge_test \ cassandra_serialize_test \ cleanable_test \ @@ -595,6 +596,14 @@ ifdef ASSERT_STATUS_CHECKED crc32c_test \ dbformat_test \ db_basic_test \ + compact_files_test \ + compaction_picker_test \ + comparator_db_test \ + db_encryption_test \ + db_iter_test \ + db_iter_stress_test \ + db_log_iter_test \ + db_bloom_filter_test \ db_blob_basic_test \ db_blob_index_test \ db_block_cache_test \ @@ -615,6 +624,19 @@ ifdef ASSERT_STATUS_CHECKED deletefile_test \ external_sst_file_test \ options_file_test \ + db_statistics_test \ + db_table_properties_test \ + db_tailing_iter_test \ + fault_injection_test \ + listener_test \ + log_test \ + manual_compaction_test \ + obsolete_files_test \ + perf_context_test \ + periodic_work_scheduler_test \ + perf_context_test \ + version_set_test \ + wal_manager_test \ defer_test \ filename_test \ dynamic_bloom_test \ @@ -658,6 +680,7 @@ ifdef ASSERT_STATUS_CHECKED ribbon_test \ skiplist_test \ slice_test \ + slice_transform_test \ sst_dump_test \ statistics_test \ stats_history_test \ @@ -694,13 +717,23 @@ ifdef ASSERT_STATUS_CHECKED flush_job_test \ block_based_filter_block_test \ block_fetcher_test \ + block_test \ + data_block_hash_index_test \ full_filter_block_test \ partitioned_filter_block_test \ column_family_test \ file_reader_writer_test \ + rate_limiter_test \ corruption_test \ + reduce_levels_test \ + thread_list_test \ + compact_on_deletion_collector_test \ db_universal_compaction_test \ import_column_family_test \ + option_change_migration_test \ + cuckoo_table_builder_test \ + cuckoo_table_db_test \ + cuckoo_table_reader_test \ memory_test \ table_test \ write_batch_test \ diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 048ed6e26..5ff69040f 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -91,9 +91,9 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) { // create couple files // Background compaction starts and waits in BackgroundCallCompaction:0 for (int i = 0; i < kLevel0Trigger * 4; ++i) { - db->Put(WriteOptions(), ToString(i), ""); - db->Put(WriteOptions(), ToString(100 - i), ""); - db->Flush(FlushOptions()); + ASSERT_OK(db->Put(WriteOptions(), ToString(i), "")); + ASSERT_OK(db->Put(WriteOptions(), ToString(100 - i), "")); + ASSERT_OK(db->Flush(FlushOptions())); } ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta; @@ -138,18 +138,18 @@ TEST_F(CompactFilesTest, ObsoleteFiles) { DB* db = nullptr; DestroyDB(db_name_, options); Status s = DB::Open(options, db_name_, &db); - assert(s.ok()); - assert(db); + ASSERT_OK(s); + ASSERT_NE(db, nullptr); // create couple files for (int i = 1000; i < 2000; ++i) { - db->Put(WriteOptions(), ToString(i), - std::string(kWriteBufferSize / 10, 'a' + (i % 26))); + ASSERT_OK(db->Put(WriteOptions(), ToString(i), + std::string(kWriteBufferSize / 10, 'a' + (i % 26)))); } auto l0_files = collector->GetFlushedFiles(); ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); - static_cast_with_check(db)->TEST_WaitForCompact(); + ASSERT_OK(static_cast_with_check(db)->TEST_WaitForCompact()); // verify all compaction input files are deleted for (auto fname : l0_files) { @@ -182,15 +182,17 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) { // create couple files for (int i = 0; i < 500; ++i) { - db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26))); + ASSERT_OK(db->Put(WriteOptions(), ToString(i), + std::string(1000, 'a' + (i % 26)))); } - static_cast_with_check(db)->TEST_WaitForFlushMemTable(); + ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); auto l0_files_1 = collector->GetFlushedFiles(); collector->ClearFlushedFiles(); for (int i = 0; i < 500; ++i) { - db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26))); + ASSERT_OK(db->Put(WriteOptions(), ToString(i), + std::string(1000, 'a' + (i % 26)))); } - static_cast_with_check(db)->TEST_WaitForFlushMemTable(); + ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); auto l0_files_2 = collector->GetFlushedFiles(); ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0)); ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0)); @@ -213,13 +215,13 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { DB* db = nullptr; DestroyDB(db_name_, options); Status s = DB::Open(options, db_name_, &db); - assert(s.ok()); + ASSERT_OK(s); assert(db); // Create 5 files. for (int i = 0; i < 5; ++i) { - db->Put(WriteOptions(), "key" + ToString(i), "value"); - db->Flush(FlushOptions()); + ASSERT_OK(db->Put(WriteOptions(), "key" + ToString(i), "value")); + ASSERT_OK(db->Flush(FlushOptions())); } auto l0_files = collector->GetFlushedFiles(); @@ -237,8 +239,8 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { // In the meantime flush another file. TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0"); - db->Put(WriteOptions(), "key5", "value"); - db->Flush(FlushOptions()); + ASSERT_OK(db->Put(WriteOptions(), "key5", "value")); + ASSERT_OK(db->Flush(FlushOptions())); TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1"); compaction_thread.join(); @@ -249,7 +251,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { // Make sure we can reopen the DB. s = DB::Open(options, db_name_, &db); - ASSERT_TRUE(s.ok()); + ASSERT_OK(s); assert(db); delete db; } @@ -293,8 +295,8 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { cf->SetDB(db); // Write one L0 file - db->Put(WriteOptions(), "K1", "V1"); - db->Flush(FlushOptions()); + ASSERT_OK(db->Put(WriteOptions(), "K1", "V1")); + ASSERT_OK(db->Flush(FlushOptions())); // Compact all L0 files using CompactFiles ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta; @@ -337,8 +339,8 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { DB* db = nullptr; ASSERT_OK(DB::Open(options, db_name_, &db)); - db->Put(WriteOptions(), "key", "val"); - db->Flush(FlushOptions()); + ASSERT_OK(db->Put(WriteOptions(), "key", "val")); + ASSERT_OK(db->Flush(FlushOptions())); auto l0_files = collector->GetFlushedFiles(); ASSERT_EQ(1, l0_files.size()); @@ -377,14 +379,15 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) { DB* db = nullptr; DestroyDB(db_name_, options); Status s = DB::Open(options, db_name_, &db); - assert(s.ok()); + ASSERT_OK(s); assert(db); // create couple files for (int i = 0; i < 500; ++i) { - db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26))); + ASSERT_OK(db->Put(WriteOptions(), ToString(i), + std::string(1000, 'a' + (i % 26)))); } - static_cast_with_check(db)->TEST_WaitForFlushMemTable(); + ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); auto l0_files_1 = collector->GetFlushedFiles(); CompactionOptions co; co.compression = CompressionType::kLZ4Compression; diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index c415fd811..bcd977667 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -141,7 +141,7 @@ class CompactionPickerTest : public testing::Test { if (temp_vstorage_) { VersionBuilder builder(FileOptions(), &ioptions_, nullptr, vstorage_.get(), nullptr); - builder.SaveTo(temp_vstorage_.get()); + ASSERT_OK(builder.SaveTo(temp_vstorage_.get())); vstorage_ = std::move(temp_vstorage_); } vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_); diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index 7bb9478ac..9b76c03d5 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -129,10 +129,10 @@ TEST_F(CuckooTableDBTest, Flush) { ASSERT_OK(Put("key1", "v1")); ASSERT_OK(Put("key2", "v2")); ASSERT_OK(Put("key3", "v3")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); TablePropertiesCollection ptc; - reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_OK(reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc)); ASSERT_EQ(1U, ptc.size()); ASSERT_EQ(3U, ptc.begin()->second->num_entries); ASSERT_EQ("1", FilesPerLevel()); @@ -146,9 +146,9 @@ TEST_F(CuckooTableDBTest, Flush) { ASSERT_OK(Put("key4", "v4")); ASSERT_OK(Put("key5", "v5")); ASSERT_OK(Put("key6", "v6")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); - reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_OK(reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc)); ASSERT_EQ(2U, ptc.size()); auto row = ptc.begin(); ASSERT_EQ(3U, row->second->num_entries); @@ -164,8 +164,8 @@ TEST_F(CuckooTableDBTest, Flush) { ASSERT_OK(Delete("key6")); ASSERT_OK(Delete("key5")); ASSERT_OK(Delete("key4")); - dbfull()->TEST_FlushMemTable(); - reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); + ASSERT_OK(reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc)); ASSERT_EQ(3U, ptc.size()); row = ptc.begin(); ASSERT_EQ(3U, row->second->num_entries); @@ -186,10 +186,10 @@ TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) { ASSERT_OK(Put("key1", "v1")); ASSERT_OK(Put("key2", "v2")); ASSERT_OK(Put("key1", "v3")); // Duplicate - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); TablePropertiesCollection ptc; - reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_OK(reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc)); ASSERT_EQ(1U, ptc.size()); ASSERT_EQ(2U, ptc.begin()->second->num_entries); ASSERT_EQ("1", FilesPerLevel()); @@ -219,7 +219,7 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) { ASSERT_OK(Put(Uint64Key(1), "v1")); ASSERT_OK(Put(Uint64Key(2), "v2")); ASSERT_OK(Put(Uint64Key(3), "v3")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ("v1", Get(Uint64Key(1))); ASSERT_EQ("v2", Get(Uint64Key(2))); @@ -228,10 +228,10 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) { // Add more keys. ASSERT_OK(Delete(Uint64Key(2))); // Delete. - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_OK(Put(Uint64Key(3), "v0")); // Update. ASSERT_OK(Put(Uint64Key(4), "v4")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ("v1", Get(Uint64Key(1))); ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2))); ASSERT_EQ("v0", Get(Uint64Key(3))); @@ -251,11 +251,11 @@ TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) { for (int idx = 0; idx < 28; ++idx) { ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx)))); } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ("1", FilesPerLevel()); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, - true /* disallow trivial move */); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, + true /* disallow trivial move */)); ASSERT_EQ("0,2", FilesPerLevel()); for (int idx = 0; idx < 28; ++idx) { ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx))); @@ -274,15 +274,15 @@ TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { for (int idx = 0; idx < 11; ++idx) { ASSERT_OK(Put(Key(idx), std::string(10000, 'a'))); } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ("1", FilesPerLevel()); // Generate one more file in level-0, and should trigger level-0 compaction for (int idx = 0; idx < 11; ++idx) { ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx)))); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_CompactRange(0, nullptr, nullptr); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr)); ASSERT_EQ("0,1", FilesPerLevel()); for (int idx = 0; idx < 11; ++idx) { @@ -303,7 +303,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) { ASSERT_OK(Put("key1", "v1")); ASSERT_OK(Put("key2", "v2")); ASSERT_OK(Put("key3", "v3")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Write some keys using plain table. std::shared_ptr block_based_factory( @@ -319,7 +319,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) { Reopen(&options); ASSERT_OK(Put("key4", "v4")); ASSERT_OK(Put("key1", "v5")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Write some keys using block based table. options.table_factory.reset(NewAdaptiveTableFactory( @@ -328,7 +328,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) { Reopen(&options); ASSERT_OK(Put("key5", "v6")); ASSERT_OK(Put("key2", "v7")); - dbfull()->TEST_FlushMemTable(); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ("v5", Get("key1")); ASSERT_EQ("v7", Get("key2")); diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index 191d72060..7c9277c14 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -128,8 +128,8 @@ TEST_P(DBBloomFilterTestDefFormatVersion, KeyMayExist) { ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); ASSERT_OK(Flush(1)); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], - true /* disallow trivial move */); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], + true /* disallow trivial move */)); numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); @@ -178,7 +178,7 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloomCustomPrefixExtractor) { ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2")); ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar")); - dbfull()->Flush(fo); + ASSERT_OK(dbfull()->Flush(fo)); ASSERT_EQ("foo", Get("barbarbar")); ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); @@ -244,7 +244,7 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) { ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2")); ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar")); - dbfull()->Flush(fo); + ASSERT_OK(dbfull()->Flush(fo)); ASSERT_EQ("foo", Get("barbarbar")); ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); @@ -297,7 +297,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) { // ranges. ASSERT_OK(dbfull()->Put(wo, "aaa", "")); ASSERT_OK(dbfull()->Put(wo, "zzz", "")); - dbfull()->Flush(fo); + ASSERT_OK(dbfull()->Flush(fo)); Reopen(options); ASSERT_EQ("NOT_FOUND", Get("foo")); @@ -328,7 +328,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) { // ranges. ASSERT_OK(dbfull()->Put(wo, "aaa", "")); ASSERT_OK(dbfull()->Put(wo, "zzz", "")); - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); // Reopen with both of whole key off and prefix extractor enabled. // Still no bloom filter should be used. @@ -351,7 +351,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) { // ranges. ASSERT_OK(dbfull()->Put(wo, "aaa", "")); ASSERT_OK(dbfull()->Put(wo, "zzz", "")); - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); options.prefix_extractor.reset(); bbto.whole_key_filtering = true; @@ -364,7 +364,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) { // not filtered out by key ranges. ASSERT_OK(dbfull()->Put(wo, "aaa", "")); ASSERT_OK(dbfull()->Put(wo, "zzz", "")); - Flush(); + ASSERT_OK(Flush()); // Now we have two files: // File 1: An older file with prefix bloom. @@ -467,7 +467,7 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) { for (int i = 0; i < N; i += 100) { ASSERT_OK(Put(1, Key(i), Key(i))); } - Flush(1); + ASSERT_OK(Flush(1)); // Prevent auto compactions triggered by seeks env_->delay_sstable_sync_.store(true, std::memory_order_release); @@ -880,7 +880,7 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) { // Destroy ASSERT_OK(dbfull()->DropColumnFamily(handles_[1])); - dbfull()->DestroyColumnFamilyHandle(handles_[1]); + ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[1])); handles_[1] = nullptr; } } @@ -1444,9 +1444,9 @@ void PrefixScanInit(DBBloomFilterTest* dbtest) { snprintf(buf, sizeof(buf), "%02d______:end", 10); keystr = std::string(buf); ASSERT_OK(dbtest->Put(keystr, keystr)); - dbtest->Flush(); - dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr, - nullptr); // move to level 1 + ASSERT_OK(dbtest->Flush()); + ASSERT_OK(dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr, + nullptr)); // move to level 1 // GROUP 1 for (int i = 1; i <= small_range_sstfiles; i++) { @@ -1563,21 +1563,21 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { for (int key : keys) { ASSERT_OK(Put(1, Key(key), "val")); if (++num_inserted % 1000 == 0) { - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } } ASSERT_OK(Put(1, Key(0), "val")); ASSERT_OK(Put(1, Key(numkeys), "val")); ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (NumTableFilesAtLevel(0, 1) == 0) { // No Level 0 file. Create one. ASSERT_OK(Put(1, Key(0), "val")); ASSERT_OK(Put(1, Key(numkeys), "val")); ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } for (int i = 1; i < numkeys; i += 2) { @@ -1682,7 +1682,8 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { BottommostLevelCompaction::kSkip; compact_options.change_level = true; compact_options.target_level = 7; - db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr) + .IsNotSupported()); ASSERT_EQ(trivial_move, 1); ASSERT_EQ(non_trivial_move, 0); @@ -1714,10 +1715,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { int CountIter(std::unique_ptr& iter, const Slice& key) { int count = 0; - for (iter->Seek(key); iter->Valid() && iter->status() == Status::OK(); - iter->Next()) { + for (iter->Seek(key); iter->Valid(); iter->Next()) { count++; } + EXPECT_OK(iter->status()); return count; } @@ -1747,7 +1748,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterUpperBound) { ASSERT_OK(Put("abcdxxx1", "val2")); ASSERT_OK(Put("abcdxxx2", "val3")); ASSERT_OK(Put("abcdxxx3", "val4")); - dbfull()->Flush(FlushOptions()); + ASSERT_OK(dbfull()->Flush(FlushOptions())); { // prefix_extractor has not changed, BF will always be read Slice upper_bound("abce"); @@ -1905,7 +1906,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterMultipleSST) { ASSERT_OK(Put("foo4", "bar4")); ASSERT_OK(Put("foq5", "bar5")); ASSERT_OK(Put("fpb", "1")); - dbfull()->Flush(FlushOptions()); + ASSERT_OK(dbfull()->Flush(FlushOptions())); { // BF is cappped:3 now std::unique_ptr iter_tmp(db_->NewIterator(read_options)); @@ -1929,7 +1930,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterMultipleSST) { ASSERT_OK(Put("foo7", "bar7")); ASSERT_OK(Put("foq8", "bar8")); ASSERT_OK(Put("fpc", "2")); - dbfull()->Flush(FlushOptions()); + ASSERT_OK(dbfull()->Flush(FlushOptions())); { // BF is fixed:2 now std::unique_ptr iter_tmp(db_->NewIterator(read_options)); @@ -2040,10 +2041,10 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterNewColumnFamily) { ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_PREFIX_USEFUL), 0); } ASSERT_OK(dbfull()->DropColumnFamily(handles_[2])); - dbfull()->DestroyColumnFamilyHandle(handles_[2]); + ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[2])); handles_[2] = nullptr; ASSERT_OK(dbfull()->DropColumnFamily(handles_[1])); - dbfull()->DestroyColumnFamilyHandle(handles_[1]); + ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[1])); handles_[1] = nullptr; iteration++; } diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 7694c789b..479220481 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1391,8 +1391,6 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { SuperVersionContext sv_context(/* create_superversion */ true); - Status status; - InstrumentedMutexLock guard_lock(&mutex_); // only allow one thread refitting @@ -1456,8 +1454,9 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { "[%s] Apply version edit:\n%s", cfd->GetName().c_str(), edit.DebugString().data()); - status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_, - directories_.GetDbDir()); + Status status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, + &mutex_, directories_.GetDbDir()); + InstallSuperVersionAndScheduleWork(cfd, &sv_context, mutable_cf_options); ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n", @@ -1468,12 +1467,14 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { "[%s] After refitting:\n%s", cfd->GetName().c_str(), cfd->current()->DebugString().data()); } + sv_context.Clean(); + refitting_level_ = false; + + return status; } - sv_context.Clean(); refitting_level_ = false; - - return status; + return Status::OK(); } int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) { diff --git a/db/db_impl/db_impl_files.cc b/db/db_impl/db_impl_files.cc index f5e19dc18..74bffffbe 100644 --- a/db/db_impl/db_impl_files.cc +++ b/db/db_impl/db_impl_files.cc @@ -191,7 +191,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, // set of all files in the directory. We'll exclude files that are still // alive in the subsequent processings. std::vector files; - env_->GetChildren(path, &files).PermitUncheckedError(); // Ignore errors + Status s = env_->GetChildren(path, &files); + s.PermitUncheckedError(); // TODO: What should we do on error? for (const std::string& file : files) { uint64_t number; FileType type; @@ -207,7 +208,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, continue; } - // TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes + // TODO(icanadi) clean up this mess to avoid having one-off "/" + // prefixes job_context->full_scan_candidate_files.emplace_back("/" + file, path); } } @@ -215,9 +217,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, // Add log files in wal_dir if (immutable_db_options_.wal_dir != dbname_) { std::vector log_files; - env_->GetChildren(immutable_db_options_.wal_dir, - &log_files) - .PermitUncheckedError(); // Ignore errors + Status s = env_->GetChildren(immutable_db_options_.wal_dir, &log_files); + s.PermitUncheckedError(); // TODO: What should we do on error? for (const std::string& log_file : log_files) { job_context->full_scan_candidate_files.emplace_back( log_file, immutable_db_options_.wal_dir); @@ -227,9 +228,9 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, if (!immutable_db_options_.db_log_dir.empty() && immutable_db_options_.db_log_dir != dbname_) { std::vector info_log_files; - // Ignore errors - env_->GetChildren(immutable_db_options_.db_log_dir, &info_log_files) - .PermitUncheckedError(); + Status s = + env_->GetChildren(immutable_db_options_.db_log_dir, &info_log_files); + s.PermitUncheckedError(); // TODO: What should we do on error? for (std::string& log_file : info_log_files) { job_context->full_scan_candidate_files.emplace_back( log_file, immutable_db_options_.db_log_dir); diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 0ab52a6c2..3ed8e9de3 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -147,7 +147,8 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) { // DeleteScheduler::CleanupDirectory on the same dir later, it will be // safe std::vector filenames; - result.env->GetChildren(result.wal_dir, &filenames).PermitUncheckedError(); + Status s = result.env->GetChildren(result.wal_dir, &filenames); + s.PermitUncheckedError(); //**TODO: What to do on error? for (std::string& filename : filenames) { if (filename.find(".log.trash", filename.length() - std::string(".log.trash").length()) != @@ -1739,9 +1740,8 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname, paths.erase(std::unique(paths.begin(), paths.end()), paths.end()); for (auto& path : paths) { std::vector existing_files; - // TODO: Check for errors here? impl->immutable_db_options_.env->GetChildren(path, &existing_files) - .PermitUncheckedError(); + .PermitUncheckedError(); //**TODO: What do to on error? for (auto& file_name : existing_files) { uint64_t file_number; FileType file_type; diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index d8c7527f2..d3dd7a0a9 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -163,7 +163,6 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE); write_thread_.JoinBatchGroup(&w); - Status status; if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) { // we are a non-leader in a parallel group @@ -193,8 +192,6 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, } assert(w.state == WriteThread::STATE_COMPLETED); // STATE_COMPLETED conditional below handles exit - - status = w.FinalStatus(); } if (w.state == WriteThread::STATE_COMPLETED) { if (log_used != nullptr) { @@ -204,13 +201,11 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, *seq_used = w.sequence; } // write is complete and leader has updated sequence - // Should we handle it? - status.PermitUncheckedError(); return w.FinalStatus(); } // else we are the leader of the write batch group assert(w.state == WriteThread::STATE_GROUP_LEADER); - + Status status; // Once reaches this point, the current writer "w" will try to do its write // job. It may also pick up some of the remaining writers in the "writers_" // when it finds suitable, and finish them in the same write batch. @@ -531,6 +526,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, PERF_TIMER_STOP(write_pre_and_post_process_time); IOStatus io_s; + io_s.PermitUncheckedError(); // Allow io_s to be uninitialized + if (w.status.ok() && !write_options.disableWAL) { PERF_TIMER_GUARD(write_wal_time); stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1); @@ -776,6 +773,7 @@ Status DBImpl::WriteImplWALOnly( } Status status; IOStatus io_s; + io_s.PermitUncheckedError(); // Allow io_s to be uninitialized if (!write_options.disableWAL) { io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc); status = io_s; diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 3b7cba140..2ff0bd9a4 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -392,6 +392,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } // Test case to check SeekToLast with iterate_upper_bound set // (same key put may times - SeekToLast should start with the @@ -489,6 +490,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } // Test to check the SeekToLast() with the iterate_upper_bound set // (Deletion cases) @@ -596,6 +598,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } { @@ -689,6 +692,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) { nullptr /* read_callback */)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } { @@ -702,6 +706,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) { nullptr /* read_callback */)); db_iter->SeekToFirst(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } } @@ -744,6 +749,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u); } @@ -788,6 +794,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } } @@ -820,6 +827,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } { @@ -855,6 +863,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } } @@ -873,9 +882,11 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { nullptr /* read_callback */)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); db_iter->SeekToFirst(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } TestIterator* internal_iter = new TestIterator(BytewiseComparator()); @@ -896,6 +907,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -904,6 +916,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Next(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } { @@ -943,6 +956,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } } @@ -983,6 +997,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); + ASSERT_OK(db_iter->status()); } } } @@ -1039,7 +1054,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { db_iter->Prev(); ASSERT_TRUE(!db_iter->Valid()); - ASSERT_TRUE(db_iter->status().ok()); + ASSERT_OK(db_iter->status()); } // Test to make sure that the request will *not* fail as incomplete if @@ -3136,6 +3151,7 @@ TEST_F(DBIteratorTest, SeekToFirstLowerBound) { if (i == kNumKeys + 1) { // lower bound was beyond the last key ASSERT_FALSE(db_iter->Valid()); + ASSERT_OK(db_iter->status()); } else { ASSERT_TRUE(db_iter->Valid()); int expected; diff --git a/db/db_log_iter_test.cc b/db/db_log_iter_test.cc index 51d232a6a..04fbe08be 100644 --- a/db/db_log_iter_test.cc +++ b/db/db_log_iter_test.cc @@ -33,9 +33,8 @@ class DBTestXactLogIterator : public DBTestBase { }; namespace { -SequenceNumber ReadRecords( - std::unique_ptr& iter, - int& count) { +SequenceNumber ReadRecords(std::unique_ptr& iter, + int& count, bool expect_ok = true) { count = 0; SequenceNumber lastSequence = 0; BatchResult res; @@ -47,6 +46,11 @@ SequenceNumber ReadRecords( EXPECT_OK(iter->status()); iter->Next(); } + if (expect_ok) { + EXPECT_OK(iter->status()); + } else { + EXPECT_NOK(iter->status()); + } return res.sequence; } @@ -64,9 +68,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIterator) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); - Put(0, "key1", DummyString(1024)); - Put(1, "key2", DummyString(1024)); - Put(1, "key2", DummyString(1024)); + ASSERT_OK(Put(0, "key1", DummyString(1024))); + ASSERT_OK(Put(1, "key2", DummyString(1024))); + ASSERT_OK(Put(1, "key2", DummyString(1024))); ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U); { auto iter = OpenTransactionLogIter(0); @@ -75,9 +79,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIterator) { ReopenWithColumnFamilies({"default", "pikachu"}, options); env_->SleepForMicroseconds(2 * 1000 * 1000); { - Put(0, "key4", DummyString(1024)); - Put(1, "key5", DummyString(1024)); - Put(0, "key6", DummyString(1024)); + ASSERT_OK(Put(0, "key4", DummyString(1024))); + ASSERT_OK(Put(1, "key5", DummyString(1024))); + ASSERT_OK(Put(0, "key6", DummyString(1024))); } { auto iter = OpenTransactionLogIter(0); @@ -109,15 +113,15 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - Put("key1", DummyString(1024)); - dbfull()->Flush(FlushOptions()); - Put("key2", DummyString(1024)); - dbfull()->Flush(FlushOptions()); - Put("key3", DummyString(1024)); - dbfull()->Flush(FlushOptions()); - Put("key4", DummyString(1024)); + ASSERT_OK(Put("key1", DummyString(1024))); + ASSERT_OK(dbfull()->Flush(FlushOptions())); + ASSERT_OK(Put("key2", DummyString(1024))); + ASSERT_OK(dbfull()->Flush(FlushOptions())); + ASSERT_OK(Put("key3", DummyString(1024))); + ASSERT_OK(dbfull()->Flush(FlushOptions())); + ASSERT_OK(Put("key4", DummyString(1024))); ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U); - dbfull()->FlushWAL(false); + ASSERT_OK(dbfull()->FlushWAL(false)); { auto iter = OpenTransactionLogIter(0); @@ -130,11 +134,11 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) { // condition FlushOptions flush_options; flush_options.wait = false; - dbfull()->Flush(flush_options); + ASSERT_OK(dbfull()->Flush(flush_options)); // "key5" would be written in a new memtable and log - Put("key5", DummyString(1024)); - dbfull()->FlushWAL(false); + ASSERT_OK(Put("key5", DummyString(1024))); + ASSERT_OK(dbfull()->FlushWAL(false)); { // this iter would miss "key4" if not fixed auto iter = OpenTransactionLogIter(0); @@ -149,14 +153,14 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - Put("key1", DummyString(1024)); + ASSERT_OK(Put("key1", DummyString(1024))); auto iter = OpenTransactionLogIter(0); ASSERT_OK(iter->status()); ASSERT_TRUE(iter->Valid()); iter->Next(); ASSERT_TRUE(!iter->Valid()); ASSERT_OK(iter->status()); - Put("key2", DummyString(1024)); + ASSERT_OK(Put("key2", DummyString(1024))); iter->Next(); ASSERT_OK(iter->status()); ASSERT_TRUE(iter->Valid()); @@ -167,9 +171,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); - Put("key1", DummyString(1024)); - Put("key2", DummyString(1023)); - dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("key1", DummyString(1024))); + ASSERT_OK(Put("key2", DummyString(1023))); + ASSERT_OK(dbfull()->Flush(FlushOptions())); Reopen(options); auto iter = OpenTransactionLogIter(0); ExpectRecords(2, iter); @@ -181,10 +185,10 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); for (int i = 0; i < 1024; i++) { - Put("key"+ToString(i), DummyString(10)); + ASSERT_OK(Put("key" + ToString(i), DummyString(10))); } - dbfull()->Flush(FlushOptions()); - dbfull()->FlushWAL(false); + ASSERT_OK(dbfull()->Flush(FlushOptions())); + ASSERT_OK(dbfull()->FlushWAL(false)); // Corrupt this log to create a gap ROCKSDB_NAMESPACE::VectorLogPtr wal_files; ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); @@ -197,13 +201,13 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { } // Insert a new entry to a new log file - Put("key1025", DummyString(10)); - dbfull()->FlushWAL(false); + ASSERT_OK(Put("key1025", DummyString(10))); + ASSERT_OK(dbfull()->FlushWAL(false)); // Try to read from the beginning. Should stop before the gap and read less // than 1025 entries auto iter = OpenTransactionLogIter(0); int count; - SequenceNumber last_sequence_read = ReadRecords(iter, count); + SequenceNumber last_sequence_read = ReadRecords(iter, count, false); ASSERT_LT(last_sequence_read, 1025U); // Try to read past the gap, should be able to seek to key1025 auto iter2 = OpenTransactionLogIter(last_sequence_read + 1); @@ -217,15 +221,15 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) { DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); WriteBatch batch; - batch.Put(handles_[1], "key1", DummyString(1024)); - batch.Put(handles_[0], "key2", DummyString(1024)); - batch.Put(handles_[1], "key3", DummyString(1024)); - batch.Delete(handles_[0], "key2"); - dbfull()->Write(WriteOptions(), &batch); - Flush(1); - Flush(0); + ASSERT_OK(batch.Put(handles_[1], "key1", DummyString(1024))); + ASSERT_OK(batch.Put(handles_[0], "key2", DummyString(1024))); + ASSERT_OK(batch.Put(handles_[1], "key3", DummyString(1024))); + ASSERT_OK(batch.Delete(handles_[0], "key2")); + ASSERT_OK(dbfull()->Write(WriteOptions(), &batch)); + ASSERT_OK(Flush(1)); + ASSERT_OK(Flush(0)); ReopenWithColumnFamilies({"default", "pikachu"}, options); - Put(1, "key4", DummyString(1024)); + ASSERT_OK(Put(1, "key4", DummyString(1024))); auto iter = OpenTransactionLogIter(3); ExpectRecords(2, iter); } while (ChangeCompactOptions()); @@ -237,13 +241,13 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) { CreateAndReopenWithCF({"pikachu"}, options); { WriteBatch batch; - batch.Put(handles_[1], "key1", DummyString(1024)); - batch.Put(handles_[0], "key2", DummyString(1024)); - batch.PutLogData(Slice("blob1")); - batch.Put(handles_[1], "key3", DummyString(1024)); - batch.PutLogData(Slice("blob2")); - batch.Delete(handles_[0], "key2"); - dbfull()->Write(WriteOptions(), &batch); + ASSERT_OK(batch.Put(handles_[1], "key1", DummyString(1024))); + ASSERT_OK(batch.Put(handles_[0], "key2", DummyString(1024))); + ASSERT_OK(batch.PutLogData(Slice("blob1"))); + ASSERT_OK(batch.Put(handles_[1], "key3", DummyString(1024))); + ASSERT_OK(batch.PutLogData(Slice("blob2"))); + ASSERT_OK(batch.Delete(handles_[0], "key2")); + ASSERT_OK(dbfull()->Write(WriteOptions(), &batch)); ReopenWithColumnFamilies({"default", "pikachu"}, options); } @@ -268,7 +272,7 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) { return Status::OK(); } } handler; - res.writeBatchPtr->Iterate(&handler); + ASSERT_OK(res.writeBatchPtr->Iterate(&handler)); ASSERT_EQ( "Put(1, key1, 1024)" "Put(0, key2, 1024)" diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index fbf6feb0e..1239b8b62 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -170,7 +170,7 @@ TEST_F(DBSSTTest, DontDeleteMovedFile) { ASSERT_OK(Flush()); } // this should execute both L0->L1 and L1->(move)->L2 compactions - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,0,1", FilesPerLevel(0)); // If the moved file is actually deleted (the move-safeguard in @@ -218,7 +218,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { ASSERT_OK(Flush()); } // this should execute both L0->L1 and L1->(move)->L2 compactions - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,0,1", FilesPerLevel(0)); test::SleepingBackgroundTask blocking_thread; @@ -264,9 +264,9 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { // finish the flush! blocking_thread.WakeUp(); blocking_thread.WaitUntilDone(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); // File just flushed is too big for L0 and L1 so gets moved to L2. - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,0,1,0,1", FilesPerLevel(0)); metadata.clear(); @@ -302,8 +302,8 @@ TEST_F(DBSSTTest, DBWithSstFileManager) { for (int i = 0; i < 25; i++) { GenerateNewRandomFile(&rnd); ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Verify that we are tracking all sst files in dbname_ std::unordered_map files_in_db; ASSERT_OK(GetAllSSTFiles(&files_in_db)); @@ -608,7 +608,7 @@ TEST_F(DBSSTTest, OpenDBWithExistingTrash) { Destroy(last_options_); // Add some trash files to the db directory so the DB can clean them up - env_->CreateDirIfMissing(dbname_); + ASSERT_OK(env_->CreateDirIfMissing(dbname_)); ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "001.sst.trash")); ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "002.sst.trash")); ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash")); @@ -733,7 +733,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) { int num_sst_files = 0; int num_wal_files = 0; std::vector db_files; - env_->GetChildren(dbname_, &db_files); + ASSERT_OK(env_->GetChildren(dbname_, &db_files)); for (std::string f : db_files) { if (f.substr(f.find_last_of(".") + 1) == "sst") { num_sst_files++; @@ -822,7 +822,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) { ASSERT_OK(Put(Key(i), rnd.RandomString(50))); } ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(true); + ASSERT_OK(dbfull()->TEST_WaitForCompact(true)); // Because we set a callback in CancelledCompaction, we actually // let the compaction run diff --git a/db/db_statistics_test.cc b/db/db_statistics_test.cc index d4e4c628b..0874eb40b 100644 --- a/db/db_statistics_test.cc +++ b/db/db_statistics_test.cc @@ -137,7 +137,7 @@ TEST_F(DBStatisticsTest, ResetStats) { ASSERT_EQ(1, TestGetTickerCount(options, NUMBER_KEYS_WRITTEN)); options.statistics->histogramData(DB_WRITE, &histogram_data); ASSERT_GT(histogram_data.max, 0.0); - options.statistics->Reset(); + ASSERT_OK(options.statistics->Reset()); } } } diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index 0cdd5b734..96cbe9f1a 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -65,9 +65,9 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { // Create 4 tables for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - db_->Put(WriteOptions(), ToString(table * 100 + i), "val"); + ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val")); } - db_->Flush(FlushOptions()); + ASSERT_OK(db_->Flush(FlushOptions())); } // 1. Read table properties directly from file @@ -161,14 +161,14 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) { for (int i = 0; i < 10000; i++) { ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102))); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (NumTableFilesAtLevel(0) == 0) { ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102))); - Flush(); + ASSERT_OK(Flush()); } - db_->PauseBackgroundWork(); + ASSERT_OK(db_->PauseBackgroundWork()); // Ensure that we have at least L0, L1 and L2 ASSERT_GT(NumTableFilesAtLevel(0), 0); @@ -236,8 +236,8 @@ TEST_F(DBTablePropertiesTest, GetColumnFamilyNameProperty) { // Create one table per CF, then verify it was created with the column family // name property. for (uint32_t cf = 0; cf < 2; ++cf) { - Put(cf, "key", "val"); - Flush(cf); + ASSERT_OK(Put(cf, "key", "val")); + ASSERT_OK(Flush(cf)); TablePropertiesCollection fname_to_props; ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props)); @@ -260,17 +260,17 @@ TEST_F(DBTablePropertiesTest, GetDbIdentifiersProperty) { CreateAndReopenWithCF({"goku"}, CurrentOptions()); for (uint32_t cf = 0; cf < 2; ++cf) { - Put(cf, "key", "val"); - Put(cf, "foo", "bar"); - Flush(cf); + ASSERT_OK(Put(cf, "key", "val")); + ASSERT_OK(Put(cf, "foo", "bar")); + ASSERT_OK(Flush(cf)); TablePropertiesCollection fname_to_props; ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props)); ASSERT_EQ(1U, fname_to_props.size()); std::string id, sid; - db_->GetDbIdentity(id); - db_->GetDbSessionId(sid); + ASSERT_OK(db_->GetDbIdentity(id)); + ASSERT_OK(db_->GetDbSessionId(sid)); ASSERT_EQ(id, fname_to_props.begin()->second->db_id); ASSERT_EQ(sid, fname_to_props.begin()->second->db_session_id); } @@ -298,9 +298,9 @@ TEST_P(DBTableHostnamePropertyTest, DbHostLocationProperty) { CreateAndReopenWithCF({"goku"}, opts); for (uint32_t cf = 0; cf < 2; ++cf) { - Put(cf, "key", "val"); - Put(cf, "foo", "bar"); - Flush(cf); + ASSERT_OK(Put(cf, "key", "val")); + ASSERT_OK(Put(cf, "foo", "bar")); + ASSERT_OK(Flush(cf)); TablePropertiesCollection fname_to_props; ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props)); @@ -356,8 +356,8 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { // add an L1 file to prevent tombstones from dropping due to obsolescence // during flush - Put(Key(0), "val"); - Flush(); + ASSERT_OK(Put(Key(0), "val")); + ASSERT_OK(Flush()); MoveFilesToLevel(1); DeletionTriggeredCompactionTestListener *listener = @@ -368,14 +368,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { for (int i = 0; i < kNumKeys; ++i) { if (i >= kNumKeys - kWindowSize && i < kNumKeys - kWindowSize + kNumDelsTrigger) { - Delete(Key(i)); + ASSERT_OK(Delete(Key(i))); } else { - Put(Key(i), "val"); + ASSERT_OK(Put(Key(i), "val")); } } - Flush(); + ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(0, NumTableFilesAtLevel(0)); // Change the window size and deletion trigger and ensure new values take @@ -389,14 +389,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { for (int i = 0; i < kNumKeys; ++i) { if (i >= kNumKeys - kWindowSize && i < kNumKeys - kWindowSize + kNumDelsTrigger) { - Delete(Key(i)); + ASSERT_OK(Delete(Key(i))); } else { - Put(Key(i), "val"); + ASSERT_OK(Put(Key(i), "val")); } } - Flush(); + ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(0, NumTableFilesAtLevel(0)); // Change the window size to disable delete triggered compaction @@ -408,14 +408,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) { for (int i = 0; i < kNumKeys; ++i) { if (i >= kNumKeys - kWindowSize && i < kNumKeys - kWindowSize + kNumDelsTrigger) { - Delete(Key(i)); + ASSERT_OK(Delete(Key(i))); } else { - Put(Key(i), "val"); + ASSERT_OK(Put(Key(i), "val")); } } - Flush(); + ASSERT_OK(Flush()); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(1, NumTableFilesAtLevel(0)); ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_WRITE_BYTES_MARKED)); ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_READ_BYTES_MARKED)); @@ -438,8 +438,8 @@ TEST_P(DBTablePropertiesTest, RatioBasedDeletionTriggeredCompactionMarking) { // Add an L2 file to prevent tombstones from dropping due to obsolescence // during flush - Put(Key(0), "val"); - Flush(); + ASSERT_OK(Put(Key(0), "val")); + ASSERT_OK(Flush()); MoveFilesToLevel(2); auto* listener = new DeletionTriggeredCompactionTestListener(); diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index f33b7cb13..e282e807e 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -179,7 +179,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { if (i % 100 == 99) { ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (i == 299) { file_iters_deleted = true; } @@ -411,7 +411,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { it->Next(); // Not valid since "21" is over the upper bound. ASSERT_FALSE(it->Valid()); - + ASSERT_OK(it->status()); // This keeps track of the number of times NeedToSeekImmutable() was true. int immutable_seeks = 0; ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( @@ -424,6 +424,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_FALSE(it->Valid()); + ASSERT_OK(it->status()); ASSERT_EQ(0, immutable_seeks); } diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 0b9806bb1..b4b071731 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1129,11 +1129,12 @@ std::string DBTestBase::FilesPerLevel(int cf) { size_t DBTestBase::CountFiles() { std::vector files; - env_->GetChildren(dbname_, &files); + EXPECT_OK(env_->GetChildren(dbname_, &files)); std::vector logfiles; if (dbname_ != last_options_.wal_dir) { - env_->GetChildren(last_options_.wal_dir, &logfiles); + Status s = env_->GetChildren(last_options_.wal_dir, &logfiles); + EXPECT_TRUE(s.ok() || s.IsNotFound()); } return files.size() + logfiles.size(); @@ -1266,8 +1267,8 @@ void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) { } ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(200))); if (!nowait) { - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } } diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index db6da3613..1a3715e32 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -192,7 +192,7 @@ class FaultInjectionTest for (int i = start_idx; i < start_idx + num_vals; i++) { Slice key = Key(i, &key_space); batch.Clear(); - batch.Put(key, Value(i, &value_space)); + ASSERT_OK(batch.Put(key, Value(i, &value_space))); ASSERT_OK(db_->Write(write_options, &batch)); } } @@ -272,12 +272,12 @@ class FaultInjectionTest for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); } - + ASSERT_OK(iter->status()); delete iter; FlushOptions flush_options; flush_options.wait = true; - db_->Flush(flush_options); + ASSERT_OK(db_->Flush(flush_options)); } // rnd cannot be null for kResetDropRandomUnsyncedData @@ -310,7 +310,7 @@ class FaultInjectionTest Build(write_options, 0, num_pre_sync); if (sync_use_compact_) { - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); } write_options.sync = false; Build(write_options, num_pre_sync, num_post_sync); @@ -342,7 +342,7 @@ class FaultInjectionTest } void WaitCompactionFinish() { - static_cast(db_->GetRootDB())->TEST_WaitForCompact(); + ASSERT_OK(static_cast(db_->GetRootDB())->TEST_WaitForCompact()); ASSERT_OK(db_->Put(WriteOptions(), "", "")); } }; @@ -409,7 +409,7 @@ TEST_P(FaultInjectionTest, WriteOptionSyncTest) { write_options.sync = true; ASSERT_OK( db_->Put(write_options, Key(2, &key_space), Value(2, &value_space))); - db_->FlushWAL(false); + ASSERT_OK(db_->FlushWAL(false)); env_->SetFilesystemActive(false); NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced); @@ -450,7 +450,7 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) { Build(WriteOptions(), 0, kNumKeys); FlushOptions flush_options; flush_options.wait = true; - db_->Flush(flush_options); + ASSERT_OK(db_->Flush(flush_options)); ASSERT_OK(db_->Put(WriteOptions(), "", "")); TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0"); TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1"); @@ -521,9 +521,9 @@ TEST_P(FaultInjectionTest, WriteBatchWalTerminationTest) { wo.sync = true; wo.disableWAL = false; WriteBatch batch; - batch.Put("cats", "dogs"); + ASSERT_OK(batch.Put("cats", "dogs")); batch.MarkWalTerminationPoint(); - batch.Put("boys", "girls"); + ASSERT_OK(batch.Put("boys", "girls")); ASSERT_OK(db_->Write(wo, &batch)); env_->SetFilesystemActive(false); diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index 43a8a4ec6..0eabc53f3 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -47,11 +47,7 @@ class ForwardLevelIterator : public InternalIterator { pinned_iters_mgr_(nullptr), prefix_extractor_(prefix_extractor), allow_unprepared_value_(allow_unprepared_value) { - /* - NOTE needed for ASSERT_STATUS_CHECKED - in MergeOperatorPinningTest/MergeOperatorPinningTest.TailingIterator - */ - status_.PermitUncheckedError(); + status_.PermitUncheckedError(); // Allow uninitialized status through } ~ForwardLevelIterator() override { diff --git a/db/listener_test.cc b/db/listener_test.cc index 1712a5be8..7c6eb9fe0 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -192,10 +192,10 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) { ASSERT_OK(Put(7, "popovich", std::string(90000, 'p'))); for (int i = 1; i < 8; ++i) { ASSERT_OK(Flush(i)); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i], nullptr, nullptr)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size()); @@ -211,6 +211,10 @@ class TestFlushListener : public EventListener { : slowdown_count(0), stop_count(0), db_closed(), env_(env), test_(test) { db_closed = false; } + + virtual ~TestFlushListener() { + prev_fc_info_.status.PermitUncheckedError(); // Ignore the status + } void OnTableFileCreated( const TableFileCreationInfo& info) override { // remember the info for later checking the FlushJobInfo. @@ -333,7 +337,7 @@ TEST_F(EventListenerTest, OnSingleDBFlushTest) { ASSERT_OK(Put(7, "popovich", std::string(90000, 'p'))); for (int i = 1; i < 8; ++i) { ASSERT_OK(Flush(i)); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ(listener->flushed_dbs_.size(), i); ASSERT_EQ(listener->flushed_column_family_names_.size(), i); } @@ -417,7 +421,7 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db)); for (size_t c = 0; c < cf_names.size(); ++c) { ColumnFamilyHandle* handle; - db->CreateColumnFamily(cf_opts, cf_names[c], &handle); + ASSERT_OK(db->CreateColumnFamily(cf_opts, cf_names[c], &handle)); handles.push_back(handle); } @@ -435,7 +439,8 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { for (size_t c = 0; c < cf_names.size(); ++c) { for (int d = 0; d < kNumDBs; ++d) { ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c])); - static_cast_with_check(dbs[d])->TEST_WaitForFlushMemTable(); + ASSERT_OK( + static_cast_with_check(dbs[d])->TEST_WaitForFlushMemTable()); } } @@ -494,10 +499,10 @@ TEST_F(EventListenerTest, DisableBGCompaction) { // keep writing until writes are forced to stop. for (int i = 0; static_cast(cf_meta.file_count) < kSlowdownTrigger * 10; ++i) { - Put(1, ToString(i), std::string(10000, 'x'), WriteOptions()); + ASSERT_OK(Put(1, ToString(i), std::string(10000, 'x'), WriteOptions())); FlushOptions fo; fo.allow_write_stall = true; - db_->Flush(fo, handles_[1]); + ASSERT_OK(db_->Flush(fo, handles_[1])); db_->GetColumnFamilyMetaData(handles_[1], &cf_meta); } ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9); @@ -534,7 +539,7 @@ TEST_F(EventListenerTest, CompactionReasonLevel) { for (int i = 0; i < 4; i++) { GenerateNewRandomFile(&rnd); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(listener->compaction_reasons_.size(), 1); ASSERT_EQ(listener->compaction_reasons_[0], @@ -551,14 +556,14 @@ TEST_F(EventListenerTest, CompactionReasonLevel) { } // Do a trivial move from L0 -> L1 - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); options.max_bytes_for_level_base = 1; Close(); listener->compaction_reasons_.clear(); Reopen(options); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(listener->compaction_reasons_.size(), 1); for (auto compaction_reason : listener->compaction_reasons_) { @@ -570,7 +575,7 @@ TEST_F(EventListenerTest, CompactionReasonLevel) { listener->compaction_reasons_.clear(); Reopen(options); - Put("key", "value"); + ASSERT_OK(Put("key", "value")); CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); @@ -604,7 +609,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) { for (int i = 0; i < 8; i++) { GenerateNewRandomFile(&rnd); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(listener->compaction_reasons_.size(), 0); for (auto compaction_reason : listener->compaction_reasons_) { @@ -622,7 +627,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) { for (int i = 0; i < 8; i++) { GenerateNewRandomFile(&rnd); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(listener->compaction_reasons_.size(), 0); for (auto compaction_reason : listener->compaction_reasons_) { @@ -634,7 +639,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) { listener->compaction_reasons_.clear(); Reopen(options); - db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_GT(listener->compaction_reasons_.size(), 0); for (auto compaction_reason : listener->compaction_reasons_) { @@ -663,7 +668,7 @@ TEST_F(EventListenerTest, CompactionReasonFIFO) { for (int i = 0; i < 4; i++) { GenerateNewRandomFile(&rnd); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(listener->compaction_reasons_.size(), 0); for (auto compaction_reason : listener->compaction_reasons_) { @@ -783,7 +788,7 @@ TEST_F(EventListenerTest, TableFileCreationListenersTest) { ASSERT_OK(Put("foo", "aaa")); ASSERT_OK(Put("bar", "bbb")); ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0); ASSERT_OK(Put("foo", "aaa1")); ASSERT_OK(Put("bar", "bbb1")); @@ -796,21 +801,23 @@ TEST_F(EventListenerTest, TableFileCreationListenersTest) { ASSERT_OK(Put("foo", "aaa2")); ASSERT_OK(Put("bar", "bbb2")); ASSERT_OK(Flush()); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0); const Slice kRangeStart = "a"; const Slice kRangeEnd = "z"; - dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK( + dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd)); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); listener->CheckAndResetCounters(0, 0, 0, 1, 1, 0); ASSERT_OK(Put("foo", "aaa3")); ASSERT_OK(Put("bar", "bbb3")); ASSERT_OK(Flush()); test_env->SetStatus(Status::NotSupported("not supported")); - dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd); - dbfull()->TEST_WaitForCompact(); + ASSERT_NOK( + dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd)); + ASSERT_NOK(dbfull()->TEST_WaitForCompact()); listener->CheckAndResetCounters(1, 1, 0, 1, 1, 1); Close(); } @@ -1076,8 +1083,8 @@ TEST_F(EventListenerTest, OnFileOperationTest) { } DestroyAndReopen(options); ASSERT_OK(Put("foo", "aaa")); - dbfull()->Flush(FlushOptions()); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->Flush(FlushOptions())); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_GE(listener->file_writes_.load(), listener->file_writes_success_.load()); ASSERT_GT(listener->file_writes_.load(), 0); diff --git a/db/log_test.cc b/db/log_test.cc index 269761968..8289fdb7e 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -191,7 +191,7 @@ class LogTest : public ::testing::TestWithParam> { Slice* get_reader_contents() { return &reader_contents_; } void Write(const std::string& msg) { - writer_.AddRecord(Slice(msg)); + ASSERT_OK(writer_.AddRecord(Slice(msg))); } size_t WrittenBytes() const { @@ -689,8 +689,8 @@ TEST_P(LogTest, Recycle) { new test::OverwritingStringSink(get_reader_contents()), "" /* don't care */)); Writer recycle_writer(std::move(dest_holder), 123, true); - recycle_writer.AddRecord(Slice("foooo")); - recycle_writer.AddRecord(Slice("bar")); + ASSERT_OK(recycle_writer.AddRecord(Slice("foooo"))); + ASSERT_OK(recycle_writer.AddRecord(Slice("bar"))); ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2); ASSERT_EQ("foooo", Read()); ASSERT_EQ("bar", Read()); @@ -782,11 +782,13 @@ class RetriableLogTest : public ::testing::TestWithParam { return file->contents_; } - void Encode(const std::string& msg) { log_writer_->AddRecord(Slice(msg)); } + void Encode(const std::string& msg) { + ASSERT_OK(log_writer_->AddRecord(Slice(msg))); + } void Write(const Slice& data) { - writer_->Append(data); - writer_->Sync(true); + ASSERT_OK(writer_->Append(data)); + ASSERT_OK(writer_->Sync(true)); } bool TryRead(std::string* result) { diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index c8039b539..9005e932a 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -100,13 +100,13 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) { options.compaction_filter = new DestroyAllCompactionFilter(); ASSERT_OK(DB::Open(options, dbname_, &db)); - db->Put(WriteOptions(), Slice("key1"), Slice("destroy")); - db->Put(WriteOptions(), Slice("key2"), Slice("destroy")); - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); - db->Put(WriteOptions(), Slice("key4"), Slice("destroy")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice("destroy"))); + ASSERT_OK(db->Put(WriteOptions(), Slice("key2"), Slice("destroy"))); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); + ASSERT_OK(db->Put(WriteOptions(), Slice("key4"), Slice("destroy"))); Slice key4("key4"); - db->CompactRange(CompactRangeOptions(), nullptr, &key4); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &key4)); Iterator* itr = db->NewIterator(ReadOptions()); itr->SeekToFirst(); ASSERT_TRUE(itr->Valid()); @@ -135,21 +135,21 @@ TEST_F(ManualCompactionTest, Test) { // create first key range WriteBatch batch; for (int i = 0; i < kNumKeys; i++) { - batch.Put(Key1(i), "value for range 1 key"); + ASSERT_OK(batch.Put(Key1(i), "value for range 1 key")); } ASSERT_OK(db->Write(WriteOptions(), &batch)); // create second key range batch.Clear(); for (int i = 0; i < kNumKeys; i++) { - batch.Put(Key2(i), "value for range 2 key"); + ASSERT_OK(batch.Put(Key2(i), "value for range 2 key")); } ASSERT_OK(db->Write(WriteOptions(), &batch)); // delete second key range batch.Clear(); for (int i = 0; i < kNumKeys; i++) { - batch.Delete(Key2(i)); + ASSERT_OK(batch.Delete(Key2(i))); } ASSERT_OK(db->Write(WriteOptions(), &batch)); @@ -160,7 +160,7 @@ TEST_F(ManualCompactionTest, Test) { Slice greatest(end_key.data(), end_key.size()); // commenting out the line below causes the example to work correctly - db->CompactRange(CompactRangeOptions(), &least, &greatest); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &least, &greatest)); // count the keys Iterator* iter = db->NewIterator(ReadOptions()); @@ -205,7 +205,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { Slice start("5"); Slice end("7"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, &end); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end)); ASSERT_EQ(0, filter->NumKeys()); } @@ -215,7 +215,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { Slice start("3"); Slice end("7"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, &end); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end)); ASSERT_EQ(2, filter->NumKeys()); ASSERT_EQ(0, filter->KeyLevel("4")); ASSERT_EQ(0, filter->KeyLevel("8")); @@ -227,7 +227,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { // no file has keys in range (-inf, 0] Slice end("0"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), nullptr, &end); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &end)); ASSERT_EQ(0, filter->NumKeys()); } @@ -237,7 +237,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { // no file has keys in range [9, inf) Slice start("9"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, nullptr); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr)); ASSERT_EQ(0, filter->NumKeys()); } @@ -248,7 +248,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { Slice start("2"); Slice end("2"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, &end); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end)); ASSERT_EQ(1, filter->NumKeys()); ASSERT_EQ(0, filter->KeyLevel("2")); } @@ -260,7 +260,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { Slice start("2"); Slice end("5"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, &end); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end)); ASSERT_EQ(3, filter->NumKeys()); ASSERT_EQ(1, filter->KeyLevel("2")); ASSERT_EQ(1, filter->KeyLevel("4")); @@ -273,7 +273,7 @@ TEST_F(ManualCompactionTest, SkipLevel) { // [0, inf) overlaps all files Slice start("0"); filter->Reset(); - db->CompactRange(CompactRangeOptions(), &start, nullptr); + ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr)); ASSERT_EQ(4, filter->NumKeys()); // 1 is first compacted to L1 and then further compacted into [2, 4, 8], // so finally the logged level for 1 is L1. diff --git a/db/obsolete_files_test.cc b/db/obsolete_files_test.cc index d198e3676..8159581a3 100644 --- a/db/obsolete_files_test.cc +++ b/db/obsolete_files_test.cc @@ -61,7 +61,7 @@ class ObsoleteFilesTest : public DBTestBase { void CheckFileTypeCounts(const std::string& dir, int required_log, int required_sst, int required_manifest) { std::vector filenames; - env_->GetChildren(dir, &filenames); + ASSERT_OK(env_->GetChildren(dir, &filenames)); int log_cnt = 0; int sst_cnt = 0; diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 7713f735e..0df8b462a 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -76,12 +76,12 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { std::string key = "k" + ToString(i); std::string value = "v" + ToString(i); - db->Put(write_options, key, value); + ASSERT_OK(db->Put(write_options, key, value)); } for (int i = 0; i < FLAGS_total_keys -1 ; ++i) { std::string key = "k" + ToString(i); - db->Delete(write_options, key); + ASSERT_OK(db->Delete(write_options, key)); } HistogramImpl hist_get; @@ -116,10 +116,9 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { auto elapsed_nanos = timer.ElapsedNanos(); if (FLAGS_verbose) { - std::cout << "SeekToFirst uesr key comparison: \n" - << hist_seek_to_first.ToString() - << "ikey skipped: " << get_perf_context()->internal_key_skipped_count - << "\n" + std::cout << "SeekToFirst user key comparison: \n" + << hist_seek_to_first.ToString() << "ikey skipped: " + << get_perf_context()->internal_key_skipped_count << "\n" << "idelete skipped: " << get_perf_context()->internal_delete_skipped_count << "\n" << "elapsed: " << elapsed_nanos << "\n"; @@ -156,7 +155,7 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { } if (FLAGS_verbose) { - std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString(); + std::cout << "Seek user key comparison: \n" << hist_seek.ToString(); } } @@ -270,7 +269,7 @@ void ProfileQueries(bool enabled_time = false) { std::vector values; get_perf_context()->Reset(); - db->Put(write_options, key, value); + ASSERT_OK(db->Put(write_options, key, value)); if (++num_mutex_waited > 3) { #ifndef NDEBUG ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U); @@ -314,7 +313,10 @@ void ProfileQueries(bool enabled_time = false) { hist_get.Add(get_perf_context()->user_key_comparison_count); get_perf_context()->Reset(); - db->MultiGet(read_options, multiget_keys, &values); + auto statuses = db->MultiGet(read_options, multiget_keys, &values); + for (const auto& s : statuses) { + ASSERT_OK(s); + } hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time); hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time); hist_mget_files.Add(get_perf_context()->get_from_output_files_time); @@ -324,9 +326,10 @@ void ProfileQueries(bool enabled_time = false) { } if (FLAGS_verbose) { - std::cout << "Put uesr key comparison: \n" << hist_put.ToString() - << "Get uesr key comparison: \n" << hist_get.ToString() - << "MultiGet uesr key comparison: \n" << hist_get.ToString(); + std::cout << "Put user key comparison: \n" + << hist_put.ToString() << "Get user key comparison: \n" + << hist_get.ToString() << "MultiGet user key comparison: \n" + << hist_get.ToString(); std::cout << "Put(): Pre and Post Process Time: \n" << hist_write_pre_post.ToString() << " Writing WAL time: \n" << hist_write_wal_time.ToString() << "\n" @@ -428,7 +431,10 @@ void ProfileQueries(bool enabled_time = false) { hist_get.Add(get_perf_context()->user_key_comparison_count); get_perf_context()->Reset(); - db->MultiGet(read_options, multiget_keys, &values); + auto statuses = db->MultiGet(read_options, multiget_keys, &values); + for (const auto& s : statuses) { + ASSERT_OK(s); + } hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time); hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time); hist_mget_files.Add(get_perf_context()->get_from_output_files_time); @@ -438,8 +444,9 @@ void ProfileQueries(bool enabled_time = false) { } if (FLAGS_verbose) { - std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString() - << "ReadOnly MultiGet uesr key comparison: \n" + std::cout << "ReadOnly Get user key comparison: \n" + << hist_get.ToString() + << "ReadOnly MultiGet user key comparison: \n" << hist_mget.ToString(); std::cout << "ReadOnly Get(): Time to get snapshot: \n" @@ -539,7 +546,7 @@ TEST_F(PerfContextTest, SeekKeyComparison) { get_perf_context()->Reset(); timer.Start(); - db->Put(write_options, key, value); + ASSERT_OK(db->Put(write_options, key, value)); auto put_time = timer.ElapsedNanos(); hist_put_time.Add(put_time); hist_wal_time.Add(get_perf_context()->write_wal_time); @@ -573,7 +580,7 @@ TEST_F(PerfContextTest, SeekKeyComparison) { iter->Next(); hist_next.Add(get_perf_context()->user_key_comparison_count); } - + ASSERT_OK(iter->status()); if (FLAGS_verbose) { std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n" << hist_next.ToString(); @@ -835,7 +842,7 @@ TEST_F(PerfContextTest, CPUTimer) { std::string value = "v" + i_str; max_str = max_str > i_str ? max_str : i_str; - db->Put(write_options, key, value); + ASSERT_OK(db->Put(write_options, key, value)); } std::string last_key = "k" + max_str; std::string last_value = "v" + max_str; diff --git a/db/periodic_work_scheduler_test.cc b/db/periodic_work_scheduler_test.cc index 9a1844287..28e7de783 100644 --- a/db/periodic_work_scheduler_test.cc +++ b/db/periodic_work_scheduler_test.cc @@ -185,7 +185,7 @@ TEST_F(PeriodicWorkSchedulerTest, MultiInstances) { ASSERT_EQ(expected_run, pst_st_counter); for (int i = half; i < kInstanceNum; i++) { - dbs[i]->Close(); + ASSERT_OK(dbs[i]->Close()); delete dbs[i]; } } @@ -217,7 +217,7 @@ TEST_F(PeriodicWorkSchedulerTest, MultiEnv) { ASSERT_EQ(dbi->TEST_GetPeriodicWorkScheduler(), dbfull()->TEST_GetPeriodicWorkScheduler()); - db->Close(); + ASSERT_OK(db->Close()); delete db; Close(); } diff --git a/db/transaction_log_impl.cc b/db/transaction_log_impl.cc index e6180903f..ba4c65ff9 100644 --- a/db/transaction_log_impl.cc +++ b/db/transaction_log_impl.cc @@ -34,7 +34,7 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl( io_tracer_(io_tracer) { assert(files_ != nullptr); assert(versions_ != nullptr); - + current_status_.PermitUncheckedError(); // Clear on start reporter_.env = options_->env; reporter_.info_log = options_->info_log.get(); SeekToStartSequence(); // Seek till starting sequence @@ -225,7 +225,8 @@ bool TransactionLogIteratorImpl::IsBatchExpected( void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) { std::unique_ptr batch(new WriteBatch()); - WriteBatchInternal::SetContents(batch.get(), record); + Status s = WriteBatchInternal::SetContents(batch.get(), record); + s.PermitUncheckedError(); // TODO: What should we do with this error? SequenceNumber expected_seq = current_last_seq_ + 1; // If the iterator has started, then confirm that we get continuous batches diff --git a/db/version_set_test.cc b/db/version_set_test.cc index cb02736de..bd94f223a 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -1231,7 +1231,7 @@ TEST_F(VersionSetTest, WalEditsNotAppliedToVersion) { [&](void* arg) { versions.push_back(reinterpret_cast(arg)); }); SyncPoint::GetInstance()->EnableProcessing(); - LogAndApplyToDefaultCF(edits); + ASSERT_OK(LogAndApplyToDefaultCF(edits)); SyncPoint::GetInstance()->DisableProcessing(); SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -1267,7 +1267,7 @@ TEST_F(VersionSetTest, NonWalEditsAppliedToVersion) { [&](void* arg) { versions.push_back(reinterpret_cast(arg)); }); SyncPoint::GetInstance()->EnableProcessing(); - LogAndApplyToDefaultCF(edits); + ASSERT_OK(LogAndApplyToDefaultCF(edits)); SyncPoint::GetInstance()->DisableProcessing(); SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -1674,7 +1674,7 @@ TEST_F(VersionSetTest, AtomicGroupWithWalEdits) { edits.back()->MarkAtomicGroup(--remaining); ASSERT_EQ(remaining, 0); - Status s = LogAndApplyToDefaultCF(edits); + ASSERT_OK(LogAndApplyToDefaultCF(edits)); // Recover a new VersionSet, the min log number and the last WAL should be // kept. diff --git a/db/wal_manager.cc b/db/wal_manager.cc index a937fc719..7e77e0361 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -243,9 +243,13 @@ void WalManager::PurgeObsoleteWALFiles() { size_t files_del_num = log_files_num - files_keep_num; VectorLogPtr archived_logs; - GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile); - - if (files_del_num > archived_logs.size()) { + s = GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile); + if (!s.ok()) { + ROCKS_LOG_WARN(db_options_.info_log, + "Unable to get archived WALs from: %s: %s", + archival_dir.c_str(), s.ToString().c_str()); + files_del_num = 0; + } else if (files_del_num > archived_logs.size()) { ROCKS_LOG_WARN(db_options_.info_log, "Trying to delete more archived log files than " "exist. Deleting all"); diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index f1800d7f2..f124bd5ad 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -69,9 +69,10 @@ class WalManagerTest : public testing::Test { assert(current_log_writer_.get() != nullptr); uint64_t seq = versions_->LastSequence() + 1; WriteBatch batch; - batch.Put(key, value); + ASSERT_OK(batch.Put(key, value)); WriteBatchInternal::SetSequence(&batch, seq); - current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch)); + ASSERT_OK( + current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch))); versions_->SetLastAllocatedSequence(seq); versions_->SetLastPublishedSequence(seq); versions_->SetLastSequence(seq); @@ -140,9 +141,9 @@ TEST_F(WalManagerTest, ReadFirstRecordCache) { log::Writer writer(std::move(file_writer), 1, db_options_.recycle_log_file_num > 0); WriteBatch batch; - batch.Put("foo", "bar"); + ASSERT_OK(batch.Put("foo", "bar")); WriteBatchInternal::SetSequence(&batch, 10); - writer.AddRecord(WriteBatchInternal::Contents(&batch)); + ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here. // Waiting for lei to finish with db_test @@ -167,14 +168,14 @@ namespace { uint64_t GetLogDirSize(std::string dir_path, Env* env) { uint64_t dir_size = 0; std::vector files; - env->GetChildren(dir_path, &files); + EXPECT_OK(env->GetChildren(dir_path, &files)); for (auto& f : files) { uint64_t number; FileType type; if (ParseFileName(f, &number, &type) && type == kWalFile) { std::string const file_path = dir_path + "/" + f; uint64_t file_size; - env->GetFileSize(file_path, &file_size); + EXPECT_OK(env->GetFileSize(file_path, &file_size)); dir_size += file_size; } } @@ -184,9 +185,9 @@ std::vector ListSpecificFiles( Env* env, const std::string& path, const FileType expected_file_type) { std::vector files; std::vector file_numbers; - env->GetChildren(path, &files); uint64_t number; FileType type; + EXPECT_OK(env->GetChildren(path, &files)); for (size_t i = 0; i < files.size(); ++i) { if (ParseFileName(files[i], &number, &type)) { if (type == expected_file_type) { @@ -209,6 +210,7 @@ int CountRecords(TransactionLogIterator* iter) { EXPECT_OK(iter->status()); iter->Next(); } + EXPECT_OK(iter->status()); return count; } } // namespace diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 4bfc4e911..a2d3f94c4 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -111,7 +111,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) { void Put(const string& key, const string& val) { kvs_.push_back(std::make_pair(key, val)); - write_batch_.Put(key, val); + ASSERT_OK(write_batch_.Put(key, val)); } void Clear() { @@ -319,7 +319,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) { DBImpl* db_impl_; } publish_seq_callback(db_impl); // seq_per_batch_ requires a natural batch separator or Noop - WriteBatchInternal::InsertNoop(&write_op.write_batch_); + ASSERT_OK(WriteBatchInternal::InsertNoop(&write_op.write_batch_)); const size_t ONE_BATCH = 1; s = db_impl->WriteImpl(woptions, &write_op.write_batch_, &write_op.callback_, nullptr, 0, false, nullptr, @@ -396,8 +396,8 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { WriteBatch wb; - wb.Put("a", "value.a"); - wb.Delete("x"); + ASSERT_OK(wb.Put("a", "value.a")); + ASSERT_OK(wb.Delete("x")); // Test a simple Write s = db->Write(write_options, &wb); @@ -411,7 +411,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { WriteCallbackTestWriteCallback1 callback1; WriteBatch wb2; - wb2.Put("a", "value.a2"); + ASSERT_OK(wb2.Put("a", "value.a2")); s = db_impl->WriteWithCallback(write_options, &wb2, &callback1); ASSERT_OK(s); @@ -425,7 +425,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) { WriteCallbackTestWriteCallback2 callback2; WriteBatch wb3; - wb3.Put("a", "value.a3"); + ASSERT_OK(wb3.Put("a", "value.a3")); s = db_impl->WriteWithCallback(write_options, &wb3, &callback2); ASSERT_NOK(s); diff --git a/file/delete_scheduler.cc b/file/delete_scheduler.cc index 5a032837e..b0c17bfc0 100644 --- a/file/delete_scheduler.cc +++ b/file/delete_scheduler.cc @@ -56,14 +56,13 @@ DeleteScheduler::~DeleteScheduler() { Status DeleteScheduler::DeleteFile(const std::string& file_path, const std::string& dir_to_sync, const bool force_bg) { - Status s; if (rate_bytes_per_sec_.load() <= 0 || (!force_bg && total_trash_size_.load() > sst_file_manager_->GetTotalSize() * max_trash_db_ratio_.load())) { // Rate limiting is disabled or trash size makes up more than // max_trash_db_ratio_ (default 25%) of the total DB size TEST_SYNC_POINT("DeleteScheduler::DeleteFile"); - s = fs_->DeleteFile(file_path, IOOptions(), nullptr); + Status s = fs_->DeleteFile(file_path, IOOptions(), nullptr); if (s.ok()) { s = sst_file_manager_->OnDeleteFile(file_path); ROCKS_LOG_INFO(info_log_, @@ -79,7 +78,7 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path, // Move file to trash std::string trash_file; - s = MarkAsTrash(file_path, &trash_file); + Status s = MarkAsTrash(file_path, &trash_file); ROCKS_LOG_INFO(info_log_, "Mark file: %s as trash -- %s", trash_file.c_str(), s.ToString().c_str()); @@ -99,7 +98,10 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path, // Update the total trash size uint64_t trash_file_size = 0; - fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr); + Status ignored = + fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr); + ignored.PermitUncheckedError(); //**TODO: What should we do if we failed to + // get the file size? total_trash_size_.fetch_add(trash_file_size); // Add file to delete queue @@ -169,17 +171,17 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path, return Status::InvalidArgument("file_path is corrupted"); } - Status s; if (DeleteScheduler::IsTrashFile(file_path)) { // This is already a trash file *trash_file = file_path; - return s; + return Status::OK(); } *trash_file = file_path + kTrashExtension; // TODO(tec) : Implement Env::RenameFileIfNotExist and remove // file_move_mu mutex. int cnt = 0; + Status s; InstrumentedMutexLock l(&file_move_mu_); while (true) { s = fs_->FileExists(*trash_file, IOOptions(), nullptr); @@ -197,7 +199,9 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path, cnt++; } if (s.ok()) { - sst_file_manager_->OnMoveFile(file_path, *trash_file); + //**TODO: What should we do if this returns an error? + sst_file_manager_->OnMoveFile(file_path, *trash_file) + .PermitUncheckedError(); } return s; } diff --git a/table/block_based/data_block_hash_index_test.cc b/table/block_based/data_block_hash_index_test.cc index 94fa7e94f..7ce296318 100644 --- a/table/block_based/data_block_hash_index_test.cc +++ b/table/block_based/data_block_hash_index_test.cc @@ -579,13 +579,13 @@ void TestBoundary(InternalKey& ik1, std::string& v1, InternalKey& ik2, 0 /*uniq_id*/, ioptions.allow_mmap_reads))); const bool kSkipFilters = true; const bool kImmortal = true; - ioptions.table_factory->NewTableReader( + ASSERT_OK(ioptions.table_factory->NewTableReader( TableReaderOptions(ioptions, moptions.prefix_extractor.get(), soptions, internal_comparator, !kSkipFilters, !kImmortal, level_), std::move(file_reader), test::GetStringSinkFromLegacyWriter(file_writer.get())->contents().size(), - &table_reader); + &table_reader)); // Search using Get() ReadOptions ro; diff --git a/table/cuckoo/cuckoo_table_builder.cc b/table/cuckoo/cuckoo_table_builder.cc index 77df906c9..15f214035 100644 --- a/table/cuckoo/cuckoo_table_builder.cc +++ b/table/cuckoo/cuckoo_table_builder.cc @@ -82,6 +82,8 @@ CuckooTableBuilder::CuckooTableBuilder( properties_.column_family_name = column_family_name; properties_.db_id = db_id; properties_.db_session_id = db_session_id; + status_.PermitUncheckedError(); + io_status_.PermitUncheckedError(); } void CuckooTableBuilder::Add(const Slice& key, const Slice& value) { @@ -250,7 +252,6 @@ Status CuckooTableBuilder::Finish() { assert(!closed_); closed_ = true; std::vector buckets; - Status s; std::string unused_bucket; if (num_entries_ > 0) { // Calculate the real hash size if module hash is enabled. diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 75aca8076..162a18cc2 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -2016,12 +2016,16 @@ void ReduceDBLevelsCommand::DoCommand() { assert(db_ != nullptr); // Compact the whole DB to put all files to the highest level. fprintf(stdout, "Compacting the db...\n"); - db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr); + st = + db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr); + CloseDB(); - EnvOptions soptions; - st = VersionSet::ReduceNumberOfLevels(db_path_, &options_, soptions, - new_levels_); + if (st.ok()) { + EnvOptions soptions; + st = VersionSet::ReduceNumberOfLevels(db_path_, &options_, soptions, + new_levels_); + } if (!st.ok()) { exec_state_ = LDBCommandExecuteResult::Failed(st.ToString()); return; diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index e301016ab..3b2fdc505 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -107,7 +107,7 @@ bool ReduceLevelTest::ReduceLevels(int target_level) { TEST_F(ReduceLevelTest, Last_Level) { ASSERT_OK(OpenDB(true, 4)); ASSERT_OK(Put("aaaa", "11111")); - Flush(); + ASSERT_OK(Flush()); MoveL0FileToLevel(3); ASSERT_EQ(FilesOnLevel(3), 1); CloseDB(); @@ -126,7 +126,7 @@ TEST_F(ReduceLevelTest, Last_Level) { TEST_F(ReduceLevelTest, Top_Level) { ASSERT_OK(OpenDB(true, 5)); ASSERT_OK(Put("aaaa", "11111")); - Flush(); + ASSERT_OK(Flush()); ASSERT_EQ(FilesOnLevel(0), 1); CloseDB(); diff --git a/util/rate_limiter.cc b/util/rate_limiter.cc index b1eefe620..d60434794 100644 --- a/util/rate_limiter.cc +++ b/util/rate_limiter.cc @@ -111,7 +111,8 @@ void GenericRateLimiter::Request(int64_t bytes, const Env::IOPriority pri, std::chrono::microseconds now(NowMicrosMonotonic(env_)); if (now - tuned_time_ >= kRefillsPerTune * std::chrono::microseconds(refill_period_us_)) { - Tune(); + Status s = Tune(); + s.PermitUncheckedError(); //**TODO: What to do on error? } } diff --git a/util/thread_list_test.cc b/util/thread_list_test.cc index 44c3ebc99..2a398bb18 100644 --- a/util/thread_list_test.cc +++ b/util/thread_list_test.cc @@ -145,7 +145,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { std::vector thread_list; // Verify the number of running threads in each pool. - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0}; for (auto thread_status : thread_list) { if (thread_status.cf_name == "pikachu" && @@ -166,7 +166,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { running_task.WaitUntilDone(); // Verify none of the threads are running - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) { running_count[i] = 0; @@ -281,7 +281,7 @@ TEST_F(ThreadListTest, SimpleEventTest) { int state_counts[ThreadStatus::NUM_STATE_TYPES] = {0}; std::vector thread_list; - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); UpdateStatusCounts(thread_list, operation_counts, state_counts); VerifyAndResetCounts(correct_operation_counts, operation_counts, ThreadStatus::NUM_OP_TYPES); @@ -293,7 +293,7 @@ TEST_F(ThreadListTest, SimpleEventTest) { UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION, ThreadStatus::OP_UNKNOWN, kCompactionWaitTasks); - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); UpdateStatusCounts(thread_list, operation_counts, state_counts); VerifyAndResetCounts(correct_operation_counts, operation_counts, ThreadStatus::NUM_OP_TYPES); @@ -305,7 +305,7 @@ TEST_F(ThreadListTest, SimpleEventTest) { UpdateCount(correct_operation_counts, ThreadStatus::OP_FLUSH, ThreadStatus::OP_UNKNOWN, kFlushWriteTasks); - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); UpdateStatusCounts(thread_list, operation_counts, state_counts); VerifyAndResetCounts(correct_operation_counts, operation_counts, ThreadStatus::NUM_OP_TYPES); @@ -317,7 +317,7 @@ TEST_F(ThreadListTest, SimpleEventTest) { UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION, ThreadStatus::OP_UNKNOWN, kCompactionWriteTasks); - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); UpdateStatusCounts(thread_list, operation_counts, state_counts); VerifyAndResetCounts(correct_operation_counts, operation_counts, ThreadStatus::NUM_OP_TYPES); @@ -329,7 +329,7 @@ TEST_F(ThreadListTest, SimpleEventTest) { UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION, ThreadStatus::OP_UNKNOWN, kCompactionReadTasks); - env->GetThreadList(&thread_list); + ASSERT_OK(env->GetThreadList(&thread_list)); UpdateStatusCounts(thread_list, operation_counts, state_counts); VerifyAndResetCounts(correct_operation_counts, operation_counts, ThreadStatus::NUM_OP_TYPES); diff --git a/utilities/cassandra/cassandra_functional_test.cc b/utilities/cassandra/cassandra_functional_test.cc index d53a07907..cd06acc92 100644 --- a/utilities/cassandra/cassandra_functional_test.cc +++ b/utilities/cassandra/cassandra_functional_test.cc @@ -58,14 +58,17 @@ class CassandraStore { } } - void Flush() { - dbfull()->TEST_FlushMemTable(); - dbfull()->TEST_WaitForCompact(); + Status Flush() { + Status s = dbfull()->TEST_FlushMemTable(); + if (s.ok()) { + s = dbfull()->TEST_WaitForCompact(); + } + return s; } - void Compact() { - dbfull()->TEST_CompactRange( - 0, nullptr, nullptr, db_->DefaultColumnFamily()); + Status Compact() { + return dbfull()->TEST_CompactRange(0, nullptr, nullptr, + db_->DefaultColumnFamily()); } std::tuple Get(const std::string& key){ @@ -189,15 +192,15 @@ TEST_F(CassandraFunctionalTest, CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now)) })); - store.Flush(); + ASSERT_OK(store.Flush()); store.Append("k1",CreateTestRowValue({ CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now)) })); - store.Flush(); - store.Compact(); + ASSERT_OK(store.Flush()); + ASSERT_OK(store.Compact()); auto ret = store.Get("k1"); ASSERT_TRUE(std::get<0>(ret)); @@ -226,15 +229,15 @@ TEST_F(CassandraFunctionalTest, CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now)) })); - store.Flush(); + ASSERT_OK(store.Flush()); store.Append("k1",CreateTestRowValue({ CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now)) })); - store.Flush(); - store.Compact(); + ASSERT_OK(store.Flush()); + ASSERT_OK(store.Compact()); auto ret = store.Get("k1"); ASSERT_TRUE(std::get<0>(ret)); @@ -259,14 +262,14 @@ TEST_F(CassandraFunctionalTest, CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)), })); - store.Flush(); + ASSERT_OK(store.Flush()); store.Append("k1",CreateTestRowValue({ CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), })); - store.Flush(); - store.Compact(); + ASSERT_OK(store.Flush()); + ASSERT_OK(store.Compact()); ASSERT_FALSE(std::get<0>(store.Get("k1"))); } @@ -285,14 +288,14 @@ TEST_F(CassandraFunctionalTest, CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now)) })); - store.Flush(); + ASSERT_OK(store.Flush()); store.Append("k1",CreateTestRowValue({ CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now)), })); - store.Flush(); - store.Compact(); + ASSERT_OK(store.Flush()); + ASSERT_OK(store.Compact()); auto ret = store.Get("k1"); ASSERT_TRUE(std::get<0>(ret)); @@ -310,8 +313,8 @@ TEST_F(CassandraFunctionalTest, CompactionShouldRemoveTombstoneFromPut) { CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)), })); - store.Flush(); - store.Compact(); + ASSERT_OK(store.Flush()); + ASSERT_OK(store.Compact()); ASSERT_FALSE(std::get<0>(store.Get("k1"))); } diff --git a/utilities/option_change_migration/option_change_migration.cc b/utilities/option_change_migration/option_change_migration.cc index f2382297b..5058c968a 100644 --- a/utilities/option_change_migration/option_change_migration.cc +++ b/utilities/option_change_migration/option_change_migration.cc @@ -60,9 +60,9 @@ Status CompactToLevel(const Options& options, const std::string& dbname, // generate one output file cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; } - db->CompactRange(cro, nullptr, nullptr); + s = db->CompactRange(cro, nullptr, nullptr); - if (need_reopen) { + if (s.ok() && need_reopen) { // Need to restart DB to rewrite the manifest file. // In order to open a DB with specific num_levels, the manifest file should // contain no record that mentiones any level beyond num_levels. Issuing a diff --git a/utilities/option_change_migration/option_change_migration_test.cc b/utilities/option_change_migration/option_change_migration_test.cc index c94564085..c457d45cd 100644 --- a/utilities/option_change_migration/option_change_migration_test.cc +++ b/utilities/option_change_migration/option_change_migration_test.cc @@ -72,8 +72,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) { for (int num = 0; num < 20; num++) { GenerateNewFile(&rnd, &key_idx); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Will make sure exactly those keys are in the DB after migration. std::set keys; @@ -100,8 +100,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) { Reopen(new_options); // Wait for compaction to finish and make sure it can reopen - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); Reopen(new_options); { @@ -140,8 +140,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) { for (int num = 0; num < 20; num++) { GenerateNewFile(&rnd, &key_idx); } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Will make sure exactly those keys are in the DB after migration. std::set keys; @@ -168,8 +168,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) { ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options)); Reopen(new_options); // Wait for compaction to finish and make sure it can reopen - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); Reopen(new_options); { @@ -207,16 +207,16 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) { ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900))); } Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (num == 9) { // Issue a full compaction to generate some zero-out files CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); } } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Will make sure exactly those keys are in the DB after migration. std::set keys; @@ -243,8 +243,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) { Reopen(new_options); // Wait for compaction to finish and make sure it can reopen - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); Reopen(new_options); { @@ -281,16 +281,16 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) { ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900))); } Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); if (num == 9) { // Issue a full compaction to generate some zero-out files CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); } } - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Will make sure exactly those keys are in the DB after migration. std::set keys; @@ -317,8 +317,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) { ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options)); Reopen(new_options); // Wait for compaction to finish and make sure it can reopen - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); Reopen(new_options); { @@ -381,7 +381,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) { Flush(); CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; - dbfull()->CompactRange(cro, nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); // Will make sure exactly those keys are in the DB after migration. std::set keys; @@ -404,8 +404,8 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) { ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options)); Reopen(new_options); // Wait for compaction to finish and make sure it can reopen - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); Reopen(new_options); { @@ -417,6 +417,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) { it->Next(); } ASSERT_TRUE(!it->Valid()); + ASSERT_OK(it->status()); } } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc index 40cb04abb..97784efe4 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc @@ -38,10 +38,11 @@ TEST(CompactOnDeletionCollector, DeletionRatio) { factory->CreateTablePropertiesCollector(context)); for (size_t i = 0; i < kTotalEntries; i++) { // All entries are deletion entries. - collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0)); ASSERT_FALSE(collector->NeedCompact()); } - collector->Finish(nullptr); + ASSERT_OK(collector->Finish(nullptr)); ASSERT_FALSE(collector->NeedCompact()); } } @@ -58,13 +59,15 @@ TEST(CompactOnDeletionCollector, DeletionRatio) { factory->CreateTablePropertiesCollector(context)); for (size_t i = 0; i < kTotalEntries; i++) { if (i < actual_deletion_entries) { - collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0)); } else { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } ASSERT_FALSE(collector->NeedCompact()); } - collector->Finish(nullptr); + ASSERT_OK(collector->Finish(nullptr)); if (delta >= 0) { // >= deletion_ratio ASSERT_TRUE(collector->NeedCompact()); @@ -123,10 +126,12 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { int deletions = 0; for (int i = 0; i < kPaddedWindowSize; ++i) { if (i % kSample < delete_rate) { - collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0)); deletions++; } else { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } } if (collector->NeedCompact() != @@ -138,7 +143,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { kWindowSize, kNumDeletionTrigger); ASSERT_TRUE(false); } - collector->Finish(nullptr); + ASSERT_OK(collector->Finish(nullptr)); } } @@ -154,21 +159,25 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { for (int section = 0; section < 5; ++section) { int initial_entries = rnd.Uniform(kWindowSize) + kWindowSize; for (int i = 0; i < initial_entries; ++i) { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } } for (int i = 0; i < kPaddedWindowSize; ++i) { if (i % kSample < delete_rate) { - collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0)); deletions++; } else { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } } for (int section = 0; section < 5; ++section) { int ending_entries = rnd.Uniform(kWindowSize) + kWindowSize; for (int i = 0; i < ending_entries; ++i) { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } } if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) && @@ -180,7 +189,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { kNumDeletionTrigger); ASSERT_TRUE(false); } - collector->Finish(nullptr); + ASSERT_OK(collector->Finish(nullptr)); } } @@ -199,9 +208,11 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { for (int section = 0; section < 200; ++section) { for (int i = 0; i < kPaddedWindowSize; ++i) { if (i < kDeletionsPerSection) { - collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0); + ASSERT_OK(collector->AddUserKey("hello", "rocksdb", kEntryDelete, + 0, 0)); } else { - collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0); + ASSERT_OK( + collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0)); } } } @@ -212,7 +223,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) { kWindowSize, kNumDeletionTrigger); ASSERT_TRUE(false); } - collector->Finish(nullptr); + ASSERT_OK(collector->Finish(nullptr)); } } } diff --git a/utilities/ttl/db_ttl_impl.cc b/utilities/ttl/db_ttl_impl.cc index dc612e851..0e0222e40 100644 --- a/utilities/ttl/db_ttl_impl.cc +++ b/utilities/ttl/db_ttl_impl.cc @@ -208,10 +208,10 @@ Status DBWithTTLImpl::Put(const WriteOptions& options, const Slice& val) { WriteBatch batch; Status st = batch.Put(column_family, key, val); - if (!st.ok()) { - return st; + if (st.ok()) { + st = Write(options, &batch); } - return Write(options, &batch); + return st; } Status DBWithTTLImpl::Get(const ReadOptions& options, @@ -264,10 +264,10 @@ Status DBWithTTLImpl::Merge(const WriteOptions& options, const Slice& value) { WriteBatch batch; Status st = batch.Merge(column_family, key, value); - if (!st.ok()) { - return st; + if (st.ok()) { + st = Write(options, &batch); } - return Write(options, &batch); + return st; } Status DBWithTTLImpl::Write(const WriteOptions& opts, WriteBatch* updates) { diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 015f67cc4..71e9677b2 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -27,7 +27,7 @@ enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 }; class SpecialTimeEnv : public EnvWrapper { public: explicit SpecialTimeEnv(Env* base) : EnvWrapper(base) { - base->GetCurrentTime(¤t_time_); + EXPECT_OK(base->GetCurrentTime(¤t_time_)); } void Sleep(int64_t sleep_time) { current_time_ += sleep_time; }