diff --git a/HISTORY.md b/HISTORY.md index 4da23b8af..ed7eb1898 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,8 @@ # Rocksdb Change Log ## Unreleased ### Public API Change +* `BackupableDBOptions::max_valid_backups_to_open == 0` now means no backups will be opened during BackupEngine initialization. Previously this condition disabled limiting backups opened. + ### New Features ### Bug Fixes diff --git a/include/rocksdb/utilities/backupable_db.h b/include/rocksdb/utilities/backupable_db.h index fc2b6ba43..150b00732 100644 --- a/include/rocksdb/utilities/backupable_db.h +++ b/include/rocksdb/utilities/backupable_db.h @@ -109,8 +109,8 @@ struct BackupableDBOptions { uint64_t callback_trigger_interval_size; // When Open() is called, it will open at most this many of the latest - // non-corrupted backups. If 0, it will open all available backups. - // Default: 0 + // non-corrupted backups. + // Default: INT_MAX int max_valid_backups_to_open; void Dump(Logger* logger) const; @@ -122,7 +122,7 @@ struct BackupableDBOptions { bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, - int _max_valid_backups_to_open = 0) + int _max_valid_backups_to_open = INT_MAX) : backup_dir(_backup_dir), backup_env(_backup_env), share_table_files(_share_table_files), diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index e2464b96c..eb40a5a19 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -617,19 +617,18 @@ Status BackupEngineImpl::Initialize() { } // load the backups if any, until valid_backups_to_open of the latest // non-corrupted backups have been successfully opened. - int valid_backups_to_open; - if (options_.max_valid_backups_to_open == 0) { - valid_backups_to_open = INT_MAX; - } else { - valid_backups_to_open = options_.max_valid_backups_to_open; - } + int valid_backups_to_open = options_.max_valid_backups_to_open; for (auto backup_iter = backups_.rbegin(); - backup_iter != backups_.rend() && valid_backups_to_open > 0; + backup_iter != backups_.rend(); ++backup_iter) { assert(latest_backup_id_ == 0 || latest_backup_id_ > backup_iter->first); if (latest_backup_id_ == 0) { latest_backup_id_ = backup_iter->first; } + if (valid_backups_to_open == 0) { + break; + } + InsertPathnameToSizeBytes( GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_, &abs_path_to_size); diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 01d7667a3..7a30e4ec3 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -1547,6 +1547,34 @@ TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) { ASSERT_EQ(1, backup_infos.size()); ASSERT_EQ(2, backup_infos[0].backup_id); } + +TEST_F(BackupableDBTest, WriteOnlyEngine) { + // Verify we can open a backup engine and create new ones even if reading old + // backups would fail with IOError. IOError is a more serious condition than + // corruption and would cause the engine to fail opening. So the only way to + // avoid is by not reading old backups at all, i.e., respecting + // `max_valid_backups_to_open == 0`. + const int kNumKeys = 5000; + OpenDBAndBackupEngine(true /* destroy_old_data */); + FillDB(db_.get(), 0 /* from */, kNumKeys); + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); + CloseDBAndBackupEngine(); + + backupable_options_->max_valid_backups_to_open = 0; + // cause any meta-file reads to fail with IOError during Open + test_backup_env_->SetDummySequentialFile(true); + test_backup_env_->SetDummySequentialFileFailReads(true); + OpenDBAndBackupEngine(); + test_backup_env_->SetDummySequentialFileFailReads(false); + test_backup_env_->SetDummySequentialFile(false); + + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); + std::vector backup_infos; + backup_engine_->GetBackupInfo(&backup_infos); + ASSERT_EQ(1, backup_infos.size()); + ASSERT_EQ(2, backup_infos[0].backup_id); +} + } // anon namespace } // namespace rocksdb