From cc2a180d00d0aeace8e8e7abc61a7b0a532592de Mon Sep 17 00:00:00 2001 From: mrambacher Date: Thu, 7 Jan 2021 15:21:51 -0800 Subject: [PATCH] Add more tests to the ASC pass list (#7834) Summary: Fixed the following to now pass ASC checks: * `ttl_test` * `blob_db_test` * `backupable_db_test`, * `delete_scheduler_test` Pull Request resolved: https://github.com/facebook/rocksdb/pull/7834 Reviewed By: jay-zhuang Differential Revision: D25795398 Pulled By: ajkr fbshipit-source-id: a10037817deda4fc7cbb353a2e00b62ed89b6476 --- Makefile | 4 + db/version_edit_handler.cc | 10 +- file/delete_scheduler.cc | 3 + file/delete_scheduler.h | 6 +- file/delete_scheduler_test.cc | 6 +- utilities/backupable/backupable_db.cc | 113 +++++++++++++-------- utilities/backupable/backupable_db_test.cc | 105 ++++++++++--------- utilities/blob_db/blob_db_impl.cc | 24 ++--- utilities/blob_db/blob_db_test.cc | 40 ++++---- utilities/blob_db/blob_file.cc | 5 +- utilities/ttl/ttl_test.cc | 61 ++++++----- 11 files changed, 220 insertions(+), 157 deletions(-) diff --git a/Makefile b/Makefile index ac98608b2..2c792f8a5 100644 --- a/Makefile +++ b/Makefile @@ -659,6 +659,7 @@ ifdef ASSERT_STATUS_CHECKED external_sst_file_basic_test \ auto_roll_logger_test \ file_indexer_test \ + delete_scheduler_test \ flush_job_test \ hash_table_test \ hash_test \ @@ -747,6 +748,9 @@ ifdef ASSERT_STATUS_CHECKED cuckoo_table_reader_test \ memory_test \ table_test \ + backupable_db_test \ + blob_db_test \ + ttl_test \ write_batch_test \ write_batch_with_index_test \ diff --git a/db/version_edit_handler.cc b/db/version_edit_handler.cc index 98cb0a831..38e2dcde7 100644 --- a/db/version_edit_handler.cc +++ b/db/version_edit_handler.cc @@ -98,12 +98,18 @@ Status ListColumnFamiliesHandler::ApplyVersionEdit( Status FileChecksumRetriever::ApplyVersionEdit(VersionEdit& edit, ColumnFamilyData** /*unused*/) { for (const auto& deleted_file : edit.GetDeletedFiles()) { - file_checksum_list_.RemoveOneFileChecksum(deleted_file.second); + Status s = file_checksum_list_.RemoveOneFileChecksum(deleted_file.second); + if (!s.ok()) { + return s; + } } for (const auto& new_file : edit.GetNewFiles()) { - file_checksum_list_.InsertOneFileChecksum( + Status s = file_checksum_list_.InsertOneFileChecksum( new_file.second.fd.GetNumber(), new_file.second.file_checksum, new_file.second.file_checksum_func_name); + if (!s.ok()) { + return s; + } } return Status::OK(); } diff --git a/file/delete_scheduler.cc b/file/delete_scheduler.cc index ed1755fd7..2693baf9a 100644 --- a/file/delete_scheduler.cc +++ b/file/delete_scheduler.cc @@ -51,6 +51,9 @@ DeleteScheduler::~DeleteScheduler() { if (bg_thread_) { bg_thread_->join(); } + for (const auto& it : bg_errors_) { + it.second.PermitUncheckedError(); + } } Status DeleteScheduler::DeleteFile(const std::string& file_path, diff --git a/file/delete_scheduler.h b/file/delete_scheduler.h index b2d17a73e..7c5ae8e00 100644 --- a/file/delete_scheduler.h +++ b/file/delete_scheduler.h @@ -26,7 +26,7 @@ class SstFileManagerImpl; // DeleteScheduler allows the DB to enforce a rate limit on file deletion, // Instead of deleteing files immediately, files are marked as trash -// and deleted in a background thread that apply sleep penlty between deletes +// and deleted in a background thread that apply sleep penalty between deletes // if they are happening in a rate faster than rate_bytes_per_sec, // // Rate limiting can be turned off by setting rate_bytes_per_sec = 0, In this @@ -48,7 +48,7 @@ class DeleteScheduler { MaybeCreateBackgroundThread(); } - // Mark file as trash directory and schedule it's deletion. If force_bg is + // Mark file as trash directory and schedule its deletion. If force_bg is // set, it forces the file to always be deleted in the background thread, // except when rate limiting is disabled Status DeleteFile(const std::string& fname, const std::string& dir_to_sync, @@ -78,7 +78,7 @@ class DeleteScheduler { static const std::string kTrashExtension; static bool IsTrashFile(const std::string& file_path); - // Check if there are any .trash filse in path, and schedule their deletion + // Check if there are any .trash files in path, and schedule their deletion // Or delete immediately if sst_file_manager is nullptr static Status CleanupDirectory(Env* env, SstFileManagerImpl* sfm, const std::string& path); diff --git a/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc index 7241dfba5..a53a50be3 100644 --- a/file/delete_scheduler_test.cc +++ b/file/delete_scheduler_test.cc @@ -423,7 +423,9 @@ TEST_F(DeleteSchedulerTest, BackgroundError) { delete_scheduler_->WaitForEmptyTrash(); auto bg_errors = delete_scheduler_->GetBackgroundErrors(); ASSERT_EQ(bg_errors.size(), 10); - + for (const auto& it : bg_errors) { + ASSERT_TRUE(it.second.IsPathNotFound()); + } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } @@ -667,7 +669,7 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) { } for (std::string& file_name : generated_files) { - delete_scheduler_->DeleteFile(file_name, ""); + ASSERT_OK(delete_scheduler_->DeleteFile(file_name, "")); } // When we end up with 26 files in trash we will start diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 885038d1e..075ac90c1 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -215,9 +215,7 @@ class BackupEngineImpl : public BackupEngine { ~BackupMeta() {} - void RecordTimestamp() { - env_->GetCurrentTime(×tamp_); - } + Status RecordTimestamp() { return env_->GetCurrentTime(×tamp_); } int64_t GetTimestamp() const { return timestamp_; } @@ -402,6 +400,16 @@ class BackupEngineImpl : public BackupEngine { std::string* db_session_id); struct CopyOrCreateResult { + ~CopyOrCreateResult() { + // The Status needs to be ignored here for two reasons. + // First, if the BackupEngineImpl shuts down with jobs outstanding, then + // it is possible that the Status in the future/promise is never read, + // resulting in an unchecked Status. Second, if there are items in the + // channel when the BackupEngineImpl is shutdown, these will also have + // Status that have not been checked. This + // TODO: Fix those issues so that the Status + status.PermitUncheckedError(); + } uint64_t size; std::string checksum_hex; std::string db_id; @@ -670,6 +678,9 @@ BackupEngineImpl::~BackupEngineImpl() { t.join(); } LogFlush(options_.info_log); + for (const auto& it : corrupt_backups_) { + it.second.first.PermitUncheckedError(); + } } Status BackupEngineImpl::Initialize() { @@ -783,7 +794,9 @@ Status BackupEngineImpl::Initialize() { for (const auto& rel_dir : {GetSharedFileRel(), GetSharedFileWithChecksumRel()}) { const auto abs_dir = GetAbsolutePath(rel_dir); - InsertPathnameToSizeBytes(abs_dir, backup_env_, &abs_path_to_size); + // TODO: What do do on error? + InsertPathnameToSizeBytes(abs_dir, backup_env_, &abs_path_to_size) + .PermitUncheckedError(); } // load the backups if any, until valid_backups_to_open of the latest // non-corrupted backups have been successfully opened. @@ -801,11 +814,13 @@ Status BackupEngineImpl::Initialize() { // Insert files and their sizes in backup sub-directories // (private/backup_id) to abs_path_to_size - InsertPathnameToSizeBytes( + Status s = InsertPathnameToSizeBytes( GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_, &abs_path_to_size); - Status s = backup_iter->second->LoadFromFile(options_.backup_dir, - abs_path_to_size); + if (s.ok()) { + s = backup_iter->second->LoadFromFile(options_.backup_dir, + abs_path_to_size); + } if (s.IsCorruption()) { ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s", backup_iter->first, s.ToString().c_str()); @@ -961,7 +976,8 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( &backuped_file_infos_, backup_env_)))); assert(ret.second == true); auto& new_backup = ret.first->second; - new_backup->RecordTimestamp(); + // TODO: What should we do on error here? + new_backup->RecordTimestamp().PermitUncheckedError(); new_backup->SetAppMetadata(app_metadata); auto start_backup = backup_env_->NowMicros(); @@ -986,7 +1002,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( std::vector backup_items_to_finish; // Add a CopyOrCreateWorkItem to the channel for each live file - db->DisableFileDeletions(); + Status disabled = db->DisableFileDeletions(); if (s.ok()) { CheckpointImpl checkpoint(db); uint64_t sequence_number = 0; @@ -1091,8 +1107,9 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( } // we copied all the files, enable file deletions - db->EnableFileDeletions(false); - + if (disabled.ok()) { // If we successfully disabled file deletions + db->EnableFileDeletions(false).PermitUncheckedError(); + } auto backup_time = backup_env_->NowMicros() - start_backup; if (s.ok()) { @@ -1133,7 +1150,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( backup_statistics_.ToString().c_str()); // delete files that we might have already written might_need_garbage_collect_ = true; - DeleteBackup(new_backup_id); + DeleteBackup(new_backup_id).PermitUncheckedError(); return s; } @@ -1201,6 +1218,7 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) { } if (!s1.ok()) { + s2.PermitUncheckedError(); // What to do? return s1; } else { return s2; @@ -1229,6 +1247,7 @@ Status BackupEngineImpl::DeleteBackupInternal(BackupID backup_id) { if (!s.ok()) { return s; } + corrupt->second.first.PermitUncheckedError(); corrupt_backups_.erase(corrupt); } @@ -1310,8 +1329,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(const RestoreOptions& options, static_cast(options.keep_log_files)); // just in case. Ignore errors - db_env_->CreateDirIfMissing(db_dir); - db_env_->CreateDirIfMissing(wal_dir); + db_env_->CreateDirIfMissing(db_dir).PermitUncheckedError(); + db_env_->CreateDirIfMissing(wal_dir).PermitUncheckedError(); if (options.keep_log_files) { // delete files in db_dir, but keep all the log files @@ -1319,7 +1338,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(const RestoreOptions& options, // move all the files from archive dir to wal_dir std::string archive_dir = ArchivalDirectory(wal_dir); std::vector archive_files; - db_env_->GetChildren(archive_dir, &archive_files); // ignore errors + db_env_->GetChildren(archive_dir, &archive_files) + .PermitUncheckedError(); // ignore errors for (const auto& f : archive_files) { uint64_t number; FileType type; @@ -1439,7 +1459,9 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id, for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(), GetSharedFileWithChecksumRel()}) { const auto abs_dir = GetAbsolutePath(rel_dir); - InsertPathnameToSizeBytes(abs_dir, backup_env_, &curr_abs_path_to_size); + // TODO: What to do on error? + InsertPathnameToSizeBytes(abs_dir, backup_env_, &curr_abs_path_to_size) + .PermitUncheckedError(); } // For all files registered in backup @@ -1463,9 +1485,11 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id, std::string checksum_hex; ROCKS_LOG_INFO(options_.info_log, "Verifying %s checksum...\n", abs_path.c_str()); - ReadFileAndComputeChecksum(abs_path, backup_env_, EnvOptions(), - 0 /* size_limit */, &checksum_hex); - if (file_info->checksum_hex != checksum_hex) { + Status s = ReadFileAndComputeChecksum(abs_path, backup_env_, EnvOptions(), + 0 /* size_limit */, &checksum_hex); + if (!s.ok()) { + return s; + } else if (file_info->checksum_hex != checksum_hex) { std::string checksum_info( "Expected checksum is " + file_info->checksum_hex + " while computed checksum is " + checksum_hex); @@ -1589,7 +1613,6 @@ Status BackupEngineImpl::AddBackupFileWorkItem( std::string dst_relative = fname.substr(1); std::string dst_relative_tmp; - Status s; std::string checksum_hex; std::string db_id; std::string db_session_id; @@ -1622,15 +1645,16 @@ Status BackupEngineImpl::AddBackupFileWorkItem( // Ignore the returned status // In the failed cases, db_id and db_session_id will be empty GetFileDbIdentities(db_env_, src_env_options, src_dir + fname, &db_id, - &db_session_id); + &db_session_id) + .PermitUncheckedError(); } // Calculate checksum if checksum and db session id are not available. // If db session id is available, we will not calculate the checksum // since the session id should suffice to avoid file name collision in // the shared_checksum directory. if (!has_checksum && db_session_id.empty()) { - s = ReadFileAndComputeChecksum(src_dir + fname, db_env_, src_env_options, - size_limit, &checksum_hex); + Status s = ReadFileAndComputeChecksum( + src_dir + fname, db_env_, src_env_options, size_limit, &checksum_hex); if (!s.ok()) { return s; } @@ -1692,7 +1716,6 @@ Status BackupEngineImpl::AddBackupFileWorkItem( } else if (exist.IsNotFound()) { file_exists = false; } else { - assert(s.IsIOError()); return exist; } } @@ -1710,7 +1733,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem( "overwrite the file.", fname.c_str()); need_to_copy = true; - backup_env_->DeleteFile(final_dest_path); + //**TODO: What to do on error? + backup_env_->DeleteFile(final_dest_path).PermitUncheckedError(); } else { // file exists and referenced if (!has_checksum) { @@ -1739,9 +1763,9 @@ Status BackupEngineImpl::AddBackupFileWorkItem( // same_path should not happen for a standard DB, so OK to // read file contents to check for checksum mismatch between // two files from same DB getting same name. - s = ReadFileAndComputeChecksum(src_dir + fname, db_env_, - src_env_options, size_limit, - &checksum_hex); + Status s = ReadFileAndComputeChecksum(src_dir + fname, db_env_, + src_env_options, size_limit, + &checksum_hex); if (!s.ok()) { return s; } @@ -1783,14 +1807,14 @@ Status BackupEngineImpl::AddBackupFileWorkItem( temp_dest_path, final_dest_path, dst_relative); backup_items_to_finish.push_back(std::move(after_copy_or_create_work_item)); CopyOrCreateResult result; - result.status = s; + result.status = Status::OK(); result.size = size_bytes; result.checksum_hex = std::move(checksum_hex); result.db_id = std::move(db_id); result.db_session_id = std::move(db_session_id); promise_result.set_value(std::move(result)); } - return s; + return Status::OK(); } Status BackupEngineImpl::ReadFileAndComputeChecksum( @@ -1893,7 +1917,7 @@ Status BackupEngineImpl::GetFileDbIdentities(Env* src_env, void BackupEngineImpl::DeleteChildren(const std::string& dir, uint32_t file_type_filter) { std::vector children; - db_env_->GetChildren(dir, &children); // ignore errors + db_env_->GetChildren(dir, &children).PermitUncheckedError(); // ignore errors for (const auto& f : children) { uint64_t number; @@ -1903,7 +1927,7 @@ void BackupEngineImpl::DeleteChildren(const std::string& dir, // don't delete this file continue; } - db_env_->DeleteFile(dir + "/" + f); // ignore errors + db_env_->DeleteFile(dir + "/" + f).PermitUncheckedError(); // ignore errors } } @@ -2016,18 +2040,19 @@ Status BackupEngineImpl::GarbageCollect() { std::string full_private_path = GetAbsolutePath(GetPrivateFileRel(backup_id)); std::vector subchildren; - backup_env_->GetChildren(full_private_path, &subchildren); - for (auto& subchild : subchildren) { - if (subchild == "." || subchild == "..") { - continue; - } - Status s = backup_env_->DeleteFile(full_private_path + subchild); - ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", - (full_private_path + subchild).c_str(), - s.ToString().c_str()); - if (!s.ok()) { - // Trying again later might work - might_need_garbage_collect_ = true; + if (backup_env_->GetChildren(full_private_path, &subchildren).ok()) { + for (auto& subchild : subchildren) { + if (subchild == "." || subchild == "..") { + continue; + } + Status s = backup_env_->DeleteFile(full_private_path + subchild); + ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", + (full_private_path + subchild).c_str(), + s.ToString().c_str()); + if (!s.ok()) { + // Trying again later might work + might_need_garbage_collect_ = true; + } } } // finally delete the private dir diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 1503a8c84..6e5d0407c 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -408,8 +408,10 @@ class FileManager : public EnvWrapper { Status GetRandomFileInDir(const std::string& dir, std::string* fname, uint64_t* fsize) { std::vector children; - GetChildrenFileAttributes(dir, &children); - if (children.size() <= 2) { // . and .. + auto s = GetChildrenFileAttributes(dir, &children); + if (!s.ok()) { + return s; + } else if (children.size() <= 2) { // . and .. return Status::NotFound("Empty directory: " + dir); } assert(fname != nullptr); @@ -428,8 +430,10 @@ class FileManager : public EnvWrapper { Status DeleteRandomFileInDir(const std::string& dir) { std::vector children; - GetChildren(dir, &children); - if (children.size() <= 2) { // . and .. + Status s = GetChildren(dir, &children); + if (!s.ok()) { + return s; + } else if (children.size() <= 2) { // . and .. return Status::NotFound(""); } while (true) { @@ -446,8 +450,10 @@ class FileManager : public EnvWrapper { Status AppendToRandomFileInDir(const std::string& dir, const std::string& data) { std::vector children; - GetChildren(dir, &children); - if (children.size() <= 2) { + Status s = GetChildren(dir, &children); + if (!s.ok()) { + return s; + } else if (children.size() <= 2) { return Status::NotFound(""); } while (true) { @@ -617,8 +623,8 @@ class BackupableDBTest : public testing::Test { // set up files std::string db_chroot = test::PerThreadDBPath("backupable_db"); std::string backup_chroot = test::PerThreadDBPath("backupable_db_backup"); - Env::Default()->CreateDir(db_chroot); - Env::Default()->CreateDir(backup_chroot); + EXPECT_OK(Env::Default()->CreateDirIfMissing(db_chroot)); + EXPECT_OK(Env::Default()->CreateDirIfMissing(backup_chroot)); dbname_ = "/tempdb"; backupdir_ = "/tempbk"; @@ -640,7 +646,10 @@ class BackupableDBTest : public testing::Test { // Create logger DBOptions logger_options; logger_options.env = db_chroot_env_.get(); - CreateLoggerFromOptions(dbname_, logger_options, &logger_); + // TODO: This should really be an EXPECT_OK, but this CreateLogger fails + // regularly in some environments with "no such directory" + CreateLoggerFromOptions(dbname_, logger_options, &logger_) + .PermitUncheckedError(); // set up backup db options backupable_options_.reset(new BackupableDBOptions( @@ -748,13 +757,13 @@ class BackupableDBTest : public testing::Test { void DeleteLogFiles() { std::vector delete_logs; - db_chroot_env_->GetChildren(dbname_, &delete_logs); + ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs)); for (auto f : delete_logs) { uint64_t number; FileType type; bool ok = ParseFileName(f, &number, &type); if (ok && type == kWalFile) { - db_chroot_env_->DeleteFile(dbname_ + "/" + f); + ASSERT_OK(db_chroot_env_->DeleteFile(dbname_ + "/" + f)); } } } @@ -943,7 +952,6 @@ TEST_F(BackupableDBTest, FileCollision) { // invalid backups TEST_P(BackupableDBTestWithParam, VerifyBackup) { const int keys_iteration = 5000; - Status s; OpenDBAndBackupEngine(true); // create five backups for (int i = 0; i < 5; ++i) { @@ -957,13 +965,13 @@ TEST_P(BackupableDBTestWithParam, VerifyBackup) { ASSERT_TRUE(backup_engine_->VerifyBackup(1).ok()); // ---------- case 2. - delete a file -----------i - file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/1"); + ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/1")); ASSERT_TRUE(backup_engine_->VerifyBackup(1).IsNotFound()); // ---------- case 3. - corrupt a file ----------- std::string append_data = "Corrupting a random file"; - file_manager_->AppendToRandomFileInDir(backupdir_ + "/private/2", - append_data); + ASSERT_OK(file_manager_->AppendToRandomFileInDir(backupdir_ + "/private/2", + append_data)); ASSERT_TRUE(backup_engine_->VerifyBackup(2).IsCorruption()); // ---------- case 4. - invalid backup ----------- @@ -1139,9 +1147,11 @@ TEST_F(BackupableDBTest, NoDoubleCopy_And_AutoGC) { // MANIFEST file size should be only 100 uint64_t size = 0; - test_backup_env_->GetFileSize(backupdir_ + "/private/2/MANIFEST-01", &size); + ASSERT_OK(test_backup_env_->GetFileSize(backupdir_ + "/private/2/MANIFEST-01", + &size)); ASSERT_EQ(100UL, size); - test_backup_env_->GetFileSize(backupdir_ + "/shared/00015.sst", &size); + ASSERT_OK( + test_backup_env_->GetFileSize(backupdir_ + "/shared/00015.sst", &size)); ASSERT_EQ(200UL, size); CloseBackupEngine(); @@ -1209,7 +1219,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) { test_backup_env_->SetLimitWrittenFiles(2); // should fail s = backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(s); test_backup_env_->SetLimitWrittenFiles(1000000); // latest backup should have all the keys CloseDBAndBackupEngine(); @@ -1221,7 +1231,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) { AssertBackupConsistency(0, 0, keys_iteration * 4, keys_iteration * 5); OpenBackupEngine(); s = backup_engine_->RestoreDBFromBackup(5, dbname_, dbname_); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(s); CloseBackupEngine(); ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/4")); // 4 is corrupted, 3 is the latest backup now @@ -1229,7 +1239,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) { OpenBackupEngine(); s = backup_engine_->RestoreDBFromBackup(4, dbname_, dbname_); CloseBackupEngine(); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(s); // --------- case 3. corrupted checksum value ---- ASSERT_OK(file_manager_->CorruptChecksum(backupdir_ + "/meta/3", false)); @@ -1243,7 +1253,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) { OpenBackupEngine(); ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2")); s = backup_engine_->RestoreDBFromBackup(2, dbname_, dbname_); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(s); // make sure that no corrupt backups have actually been deleted! ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/1")); @@ -1280,7 +1290,6 @@ TEST_F(BackupableDBTest, CorruptionsTest) { file_manager_->FileExists(backupdir_ + "/meta/2")); ASSERT_EQ(Status::NotFound(), file_manager_->FileExists(backupdir_ + "/private/2")); - CloseBackupEngine(); AssertBackupConsistency(0, 0, keys_iteration * 1, keys_iteration * 5); @@ -1295,7 +1304,6 @@ TEST_F(BackupableDBTest, CorruptionsTest) { // Corrupt a file but maintain its size TEST_F(BackupableDBTest, CorruptFileMaintainSize) { const int keys_iteration = 5000; - Status s; OpenDBAndBackupEngine(true); // create a backup FillDB(db_.get(), 0, keys_iteration); @@ -1514,8 +1522,7 @@ TEST_F(BackupableDBTest, InterruptCreationTest) { test_backup_env_->SetLimitWrittenFiles(2); test_backup_env_->SetDeleteFileFailure(true); // should fail creation - ASSERT_FALSE( - backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)).ok()); + ASSERT_NOK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2))); CloseDBAndBackupEngine(); // should also fail cleanup so the tmp directory stays behind ASSERT_OK(backup_chroot_env_->FileExists(backupdir_ + "/private/1/")); @@ -1548,9 +1555,9 @@ TEST_F(BackupableDBTest, FlushCompactDuringBackupCheckpoint) { FillDB(db_.get(), keys_iteration, 2 * keys_iteration); ASSERT_OK(db_->Flush(FlushOptions())); DBImpl* dbi = static_cast(db_.get()); - dbi->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); - dbi->TEST_WaitForCompact(); + ASSERT_OK(dbi->TEST_WaitForCompact()); TEST_SYNC_POINT( "BackupableDBTest::FlushCompactDuringBackupCheckpoint:After"); }}; @@ -1600,10 +1607,11 @@ TEST_F(BackupableDBTest, BackupOptions) { db_.reset(); db_.reset(OpenDB()); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); - ROCKSDB_NAMESPACE::GetLatestOptionsFileName(db_->GetName(), options_.env, - &name); + ASSERT_OK(ROCKSDB_NAMESPACE::GetLatestOptionsFileName(db_->GetName(), + options_.env, &name)); ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name)); - backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i), &filenames); + ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i), + &filenames)); for (auto fn : filenames) { if (fn.compare(0, 7, "OPTIONS") == 0) { ASSERT_EQ(name, fn); @@ -1646,7 +1654,6 @@ TEST_F(BackupableDBTest, SetOptionsBackupRaceCondition) { TEST_F(BackupableDBTest, NoDeleteWithReadOnly) { const int keys_iteration = 5000; Random rnd(6); - Status s; OpenDBAndBackupEngine(true); // create five backups @@ -1777,8 +1784,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { // For an extra challenge, make sure that GarbageCollect / DeleteBackup // is OK even if we open without share_table_files OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); - backup_engine_->DeleteBackup(1); - backup_engine_->GarbageCollect(); + ASSERT_OK(backup_engine_->DeleteBackup(1)); + ASSERT_OK(backup_engine_->GarbageCollect()); CloseDBAndBackupEngine(); // Verify rest (not deleted) @@ -1939,7 +1946,7 @@ TEST_F(BackupableDBTest, TableFileCorruptionBeforeIncremental) { // And a bigger one ASSERT_OK(dbi->Put(WriteOptions(), "y", Random(42).RandomString(500))); ASSERT_OK(dbi->Flush(FlushOptions())); - dbi->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); CloseDBAndBackupEngine(); std::vector table_files; @@ -2198,8 +2205,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) { ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == kNamingDefault); OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); - backup_engine_->DeleteBackup(1); - backup_engine_->GarbageCollect(); + ASSERT_OK(backup_engine_->DeleteBackup(1)); + ASSERT_OK(backup_engine_->GarbageCollect()); CloseDBAndBackupEngine(); // Verify second (about to delete) @@ -2211,8 +2218,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) { backupable_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); - backup_engine_->DeleteBackup(2); - backup_engine_->GarbageCollect(); + ASSERT_OK(backup_engine_->DeleteBackup(2)); + ASSERT_OK(backup_engine_->GarbageCollect()); CloseDBAndBackupEngine(); // Verify rest (not deleted) @@ -2263,8 +2270,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { // For an extra challenge, make sure that GarbageCollect / DeleteBackup // is OK even if we open without share_table_files OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); - backup_engine_->DeleteBackup(1); - backup_engine_->GarbageCollect(); + ASSERT_OK(backup_engine_->DeleteBackup(1)); + ASSERT_OK(backup_engine_->GarbageCollect()); CloseDBAndBackupEngine(); // Verify second (about to delete) @@ -2276,8 +2283,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { backupable_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); - backup_engine_->DeleteBackup(2); - backup_engine_->GarbageCollect(); + ASSERT_OK(backup_engine_->DeleteBackup(2)); + ASSERT_OK(backup_engine_->GarbageCollect()); CloseDBAndBackupEngine(); // Verify rest (not deleted) @@ -2322,11 +2329,11 @@ TEST_F(BackupableDBTest, DeleteTmpFiles) { std::make_pair(next_private, std::string("00003.sst")), }) { std::string dir = backupdir_ + "/" + dir_and_file.first; - file_manager_->CreateDir(dir); + ASSERT_OK(file_manager_->CreateDirIfMissing(dir)); ASSERT_OK(file_manager_->FileExists(dir)); std::string file = dir + "/" + dir_and_file.second; - file_manager_->WriteToFile(file, "tmp"); + ASSERT_OK(file_manager_->WriteToFile(file, "tmp")); ASSERT_OK(file_manager_->FileExists(file)); tmp_files_and_dirs.push_back(file); @@ -2509,7 +2516,7 @@ TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); - backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared"); + ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared")); std::string file_five = backupdir_ + "/shared/000008.sst"; std::string file_five_contents = "I'm not really a sst file"; // this depends on the fact that 00008.sst is the first file created by the DB @@ -2517,7 +2524,7 @@ TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) { FillDB(db_.get(), 0, 100); // backup overwrites file 000008.sst - ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok()); + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); std::string new_file_five_contents; ASSERT_OK(ReadFileToString(backup_chroot_env_.get(), file_five, @@ -2563,7 +2570,7 @@ TEST_F(BackupableDBTest, EnvFailures) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); - ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok()); + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); CloseDBAndBackupEngine(); test_backup_env_->SetDummySequentialFile(true); test_backup_env_->SetDummySequentialFileFailReads(true); @@ -2626,7 +2633,8 @@ TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) { TEST_F(BackupableDBTest, Issue921Test) { BackupEngine* backup_engine; backupable_options_->share_table_files = false; - backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir); + ASSERT_OK( + backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir)); backupable_options_->backup_dir += "/new_dir"; ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *backupable_options_, &backup_engine)); @@ -2814,6 +2822,7 @@ TEST_P(BackupableDBTestWithParam, BackupUsingDirectIO) { // We use ChrootEnv underneath so the below line checks for direct I/O support // in the chroot directory, not the true filesystem root. if (!test::IsDirectIOSupported(test_db_env_.get(), "/")) { + ROCKSDB_GTEST_SKIP("Test requires Direct I/O Support"); return; } const int kNumKeysPerBackup = 100; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index d8e77316e..5cce0df6e 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -2055,21 +2055,21 @@ Status DestroyBlobDB(const std::string& dbname, const Options& options, : bdb_options.blob_dir; std::vector filenames; - env->GetChildren(blobdir, &filenames); - - for (const auto& f : filenames) { - uint64_t number; - FileType type; - if (ParseFileName(f, &number, &type) && type == kBlobFile) { - Status del = DeleteDBFile(&soptions, blobdir + "/" + f, blobdir, true, - /*force_fg=*/false); - if (status.ok() && !del.ok()) { - status = del; + if (env->GetChildren(blobdir, &filenames).ok()) { + for (const auto& f : filenames) { + uint64_t number; + FileType type; + if (ParseFileName(f, &number, &type) && type == kBlobFile) { + Status del = DeleteDBFile(&soptions, blobdir + "/" + f, blobdir, true, + /*force_fg=*/false); + if (status.ok() && !del.ok()) { + status = del; + } } } + // TODO: What to do if we cannot delete the directory? + env->DeleteDir(blobdir).PermitUncheckedError(); } - env->DeleteDir(blobdir); - Status destroy = DestroyDB(dbname, options); if (status.ok() && !destroy.ok()) { status = destroy; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index f2a8146c0..d9b097925 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -236,7 +236,7 @@ class BlobDBTest : public testing::Test { DB *db = blob_db_->GetRootDB(); const size_t kMaxKeys = 10000; std::vector versions; - GetAllKeyVersions(db, "", "", kMaxKeys, &versions); + ASSERT_OK(GetAllKeyVersions(db, "", "", kMaxKeys, &versions)); ASSERT_EQ(expected_versions.size(), versions.size()); size_t i = 0; for (auto &key_version : expected_versions) { @@ -412,8 +412,8 @@ TEST_F(BlobDBTest, GetExpiration) { bdb_options.disable_background_tasks = true; mock_env_->set_current_time(100); Open(bdb_options, options); - Put("key1", "value1"); - PutWithTTL("key2", "value2", 200); + ASSERT_OK(Put("key1", "value1")); + ASSERT_OK(PutWithTTL("key2", "value2", 200)); PinnableSlice value; uint64_t expiration; ASSERT_OK(blob_db_->Get(ReadOptions(), "key1", &value, &expiration)); @@ -466,7 +466,8 @@ TEST_F(BlobDBTest, WriteBatch) { for (size_t j = 0; j < 10; j++) { PutRandomToWriteBatch("key" + ToString(j * 100 + i), &rnd, &batch, &data); } - blob_db_->Write(WriteOptions(), &batch); + + ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); } VerifyDB(data); } @@ -498,7 +499,7 @@ TEST_F(BlobDBTest, DeleteBatch) { } WriteBatch batch; for (size_t i = 0; i < 100; i++) { - batch.Delete("key" + ToString(i)); + ASSERT_OK(batch.Delete("key" + ToString(i))); } ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); // DB should be empty. @@ -540,7 +541,7 @@ TEST_F(BlobDBTest, Compression) { PutRandomToWriteBatch("write-batch-key" + ToString(j * 100 + i), &rnd, &batch, &data); } - blob_db_->Write(WriteOptions(), &batch); + ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); } VerifyDB(data); } @@ -732,7 +733,7 @@ TEST_F(BlobDBTest, MultipleWriters) { } else { WriteBatch batch; PutRandomToWriteBatch(key, &rnd, &batch, &data_set[id]); - blob_db_->Write(WriteOptions(), &batch); + ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); } } }, @@ -775,7 +776,7 @@ TEST_F(BlobDBTest, SstFileManager) { Open(bdb_options, db_options); // Create one obselete file and clean it. - blob_db_->Put(WriteOptions(), "foo", "bar"); + ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "bar")); auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); std::shared_ptr bfile = blob_files[0]; @@ -820,14 +821,15 @@ TEST_F(BlobDBTest, SstFileManagerRestart) { Open(bdb_options, db_options); std::string blob_dir = blob_db_impl()->TEST_blob_dir(); - blob_db_->Put(WriteOptions(), "foo", "bar"); + ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "bar")); Close(); // Create 3 dummy trash files under the blob_dir const auto &fs = db_options.env->GetFileSystem(); - CreateFile(fs, blob_dir + "/000666.blob.trash", "", false); - CreateFile(fs, blob_dir + "/000888.blob.trash", "", true); - CreateFile(fs, blob_dir + "/something_not_match.trash", "", false); + ASSERT_OK(CreateFile(fs, blob_dir + "/000666.blob.trash", "", false)); + ASSERT_OK(CreateFile(fs, blob_dir + "/000888.blob.trash", "", true)); + ASSERT_OK( + CreateFile(fs, blob_dir + "/something_not_match.trash", "", false)); // Make sure that reopening the DB rescan the existing trash files Open(bdb_options, db_options); @@ -938,8 +940,8 @@ TEST_F(BlobDBTest, ColumnFamilyNotSupported) { ASSERT_TRUE(blob_db_->PutUntil(WriteOptions(), handle, "k", "v", 100) .IsNotSupported()); WriteBatch batch; - batch.Put("k1", "v1"); - batch.Put(handle, "k2", "v2"); + ASSERT_OK(batch.Put("k1", "v1")); + ASSERT_OK(batch.Put(handle, "k2", "v2")); ASSERT_TRUE(blob_db_->Write(WriteOptions(), &batch).IsNotSupported()); ASSERT_TRUE(blob_db_->Get(ReadOptions(), "k1", &value).IsNotFound()); ASSERT_TRUE( @@ -1538,7 +1540,7 @@ TEST_F(BlobDBTest, FilterExpiredBlobIndex) { // Verify expired blob index are filtered. std::vector versions; const size_t kMaxKeys = 10000; - GetAllKeyVersions(blob_db_, "", "", kMaxKeys, &versions); + ASSERT_OK(GetAllKeyVersions(blob_db_, "", "", kMaxKeys, &versions)); ASSERT_EQ(data_after_compact.size(), versions.size()); for (auto &version : versions) { ASSERT_TRUE(data_after_compact.count(version.user_key) > 0); @@ -1889,8 +1891,8 @@ TEST_F(BlobDBTest, GarbageCollectionFailure) { Open(bdb_options, db_options); // Write a couple of valid blobs. - Put("foo", "bar"); - Put("dead", "beef"); + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Put("dead", "beef")); // Write a fake blob reference into the base DB that cannot be parsed. WriteBatch batch; @@ -2325,7 +2327,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeClose) { Open(blob_options, options); - Put("foo", "bar"); + ASSERT_OK(Put("foo", "bar")); auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(blob_files.size(), 1); @@ -2345,7 +2347,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeCloseIOError) { Open(blob_options, options); - Put("foo", "bar"); + ASSERT_OK(Put("foo", "bar")); auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(blob_files.size(), 1); diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index a3d208c3b..8c95b78f4 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -158,8 +158,9 @@ Status BlobFile::GetReader(Env* env, const EnvOptions& env_options, assert(fresh_open != nullptr); *fresh_open = false; int64_t current_time = 0; - env->GetCurrentTime(¤t_time); - last_access_.store(current_time); + if (env->GetCurrentTime(¤t_time).ok()) { + last_access_.store(current_time); + } Status s; { diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 71e9677b2..099ed6859 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -95,7 +95,7 @@ class TtlTest : public testing::Test { void CloseTtlHelper(bool close_db) { if (db_ttl_ != nullptr) { if (close_db) { - db_ttl_->Close(); + EXPECT_OK(db_ttl_->Close()); } delete db_ttl_; db_ttl_ = nullptr; @@ -137,17 +137,17 @@ class TtlTest : public testing::Test { for (int64_t i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, ++kv_it_) { switch (batch_ops[i]) { case OP_PUT: - batch.Put(kv_it_->first, kv_it_->second); + ASSERT_OK(batch.Put(kv_it_->first, kv_it_->second)); break; case OP_DELETE: - batch.Delete(kv_it_->first); + ASSERT_OK(batch.Delete(kv_it_->first)); break; default: FAIL(); } } - db_ttl_->Write(wopts, &batch); - db_ttl_->Flush(flush_opts); + ASSERT_OK(db_ttl_->Write(wopts, &batch)); + ASSERT_OK(db_ttl_->Flush(flush_opts)); } // Puts num_entries starting from start_pos_map from kvmap_ into the database @@ -170,19 +170,19 @@ class TtlTest : public testing::Test { : db_ttl_->Put(wopts, cf, "keymock", "valuemock")); if (flush) { if (cf == nullptr) { - db_ttl_->Flush(flush_opts); + ASSERT_OK(db_ttl_->Flush(flush_opts)); } else { - db_ttl_->Flush(flush_opts, cf); + ASSERT_OK(db_ttl_->Flush(flush_opts, cf)); } } } // Runs a manual compaction - void ManualCompact(ColumnFamilyHandle* cf = nullptr) { + Status ManualCompact(ColumnFamilyHandle* cf = nullptr) { if (cf == nullptr) { - db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + return db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr); } else { - db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr); + return db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr); } } @@ -225,18 +225,9 @@ class TtlTest : public testing::Test { } } - // Sleeps for slp_tim then runs a manual compaction - // Checks span starting from st_pos from kvmap_ in the db and - // Gets should return true if check is true and false otherwise - // Also checks that value that we got is the same as inserted; and =kNewValue - // if test_compaction_change is true - void SleepCompactCheck(int slp_tim, int64_t st_pos, int64_t span, - bool check = true, bool test_compaction_change = false, - ColumnFamilyHandle* cf = nullptr) { - ASSERT_TRUE(db_ttl_); - - env_->Sleep(slp_tim); - ManualCompact(cf); + void CompactCheck(int64_t st_pos, int64_t span, bool check = true, + bool test_compaction_change = false, + ColumnFamilyHandle* cf = nullptr) { static ReadOptions ropts; kv_it_ = kvmap_.begin(); advance(kv_it_, st_pos); @@ -267,13 +258,27 @@ class TtlTest : public testing::Test { } } } + // Sleeps for slp_tim then runs a manual compaction + // Checks span starting from st_pos from kvmap_ in the db and + // Gets should return true if check is true and false otherwise + // Also checks that value that we got is the same as inserted; and =kNewValue + // if test_compaction_change is true + void SleepCompactCheck(int slp_tim, int64_t st_pos, int64_t span, + bool check = true, bool test_compaction_change = false, + ColumnFamilyHandle* cf = nullptr) { + ASSERT_TRUE(db_ttl_); + + env_->Sleep(slp_tim); + ASSERT_OK(ManualCompact(cf)); + CompactCheck(st_pos, span, check, test_compaction_change, cf); + } // Similar as SleepCompactCheck but uses TtlIterator to read from db void SleepCompactCheckIter(int slp, int st_pos, int64_t span, bool check = true) { ASSERT_TRUE(db_ttl_); env_->Sleep(slp); - ManualCompact(); + ASSERT_OK(ManualCompact()); static ReadOptions ropts; Iterator *dbiter = db_ttl_->NewIterator(ropts); kv_it_ = kvmap_.begin(); @@ -292,6 +297,7 @@ class TtlTest : public testing::Test { dbiter->Next(); } } + ASSERT_OK(dbiter->status()); delete dbiter; } @@ -534,11 +540,16 @@ TEST_F(TtlTest, ReadOnlyPresentForever) { MakeKVMap(kSampleSize_); OpenTtl(1); // T=0:Open the db normally - PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1 + PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1 CloseTtl(); OpenReadOnlyTtl(1); - SleepCompactCheck(2, 0, kSampleSize_); // T=2:Set1 should still be there + ASSERT_TRUE(db_ttl_); + + env_->Sleep(2); + Status s = ManualCompact(); // T=2:Set1 should still be there + ASSERT_TRUE(s.IsNotSupported()); + CompactCheck(0, kSampleSize_); CloseTtl(); }