support opening zero backups during engine init

Summary:
There are internal users who open BackupEngine for writing new backups only, and they don't care whether old backups can be read or not. The condition `BackupableDBOptions::max_valid_backups_to_open == 0` should be supported (previously in df74b775e6 I made the mistake of choosing 0 as a special value to disable the limit).
Closes https://github.com/facebook/rocksdb/pull/2819

Differential Revision: D5751599

Pulled By: ajkr

fbshipit-source-id: e73ac19eb5d756d6b68601eae8e43407ee4f2752
main
Andrew Kryczka 7 years ago committed by Facebook Github Bot
parent 3c42807794
commit f5148ade10
  1. 2
      HISTORY.md
  2. 6
      include/rocksdb/utilities/backupable_db.h
  3. 13
      utilities/backupable/backupable_db.cc
  4. 28
      utilities/backupable/backupable_db_test.cc

@ -1,6 +1,8 @@
# Rocksdb Change Log
## Unreleased
### Public API Change
* `BackupableDBOptions::max_valid_backups_to_open == 0` now means no backups will be opened during BackupEngine initialization. Previously this condition disabled limiting backups opened.
### New Features
### Bug Fixes

@ -109,8 +109,8 @@ struct BackupableDBOptions {
uint64_t callback_trigger_interval_size;
// When Open() is called, it will open at most this many of the latest
// non-corrupted backups. If 0, it will open all available backups.
// Default: 0
// non-corrupted backups.
// Default: INT_MAX
int max_valid_backups_to_open;
void Dump(Logger* logger) const;
@ -122,7 +122,7 @@ struct BackupableDBOptions {
bool _backup_log_files = true, uint64_t _backup_rate_limit = 0,
uint64_t _restore_rate_limit = 0, int _max_background_operations = 1,
uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024,
int _max_valid_backups_to_open = 0)
int _max_valid_backups_to_open = INT_MAX)
: backup_dir(_backup_dir),
backup_env(_backup_env),
share_table_files(_share_table_files),

@ -617,19 +617,18 @@ Status BackupEngineImpl::Initialize() {
}
// load the backups if any, until valid_backups_to_open of the latest
// non-corrupted backups have been successfully opened.
int valid_backups_to_open;
if (options_.max_valid_backups_to_open == 0) {
valid_backups_to_open = INT_MAX;
} else {
valid_backups_to_open = options_.max_valid_backups_to_open;
}
int valid_backups_to_open = options_.max_valid_backups_to_open;
for (auto backup_iter = backups_.rbegin();
backup_iter != backups_.rend() && valid_backups_to_open > 0;
backup_iter != backups_.rend();
++backup_iter) {
assert(latest_backup_id_ == 0 || latest_backup_id_ > backup_iter->first);
if (latest_backup_id_ == 0) {
latest_backup_id_ = backup_iter->first;
}
if (valid_backups_to_open == 0) {
break;
}
InsertPathnameToSizeBytes(
GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_,
&abs_path_to_size);

@ -1547,6 +1547,34 @@ TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) {
ASSERT_EQ(1, backup_infos.size());
ASSERT_EQ(2, backup_infos[0].backup_id);
}
TEST_F(BackupableDBTest, WriteOnlyEngine) {
// Verify we can open a backup engine and create new ones even if reading old
// backups would fail with IOError. IOError is a more serious condition than
// corruption and would cause the engine to fail opening. So the only way to
// avoid is by not reading old backups at all, i.e., respecting
// `max_valid_backups_to_open == 0`.
const int kNumKeys = 5000;
OpenDBAndBackupEngine(true /* destroy_old_data */);
FillDB(db_.get(), 0 /* from */, kNumKeys);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
CloseDBAndBackupEngine();
backupable_options_->max_valid_backups_to_open = 0;
// cause any meta-file reads to fail with IOError during Open
test_backup_env_->SetDummySequentialFile(true);
test_backup_env_->SetDummySequentialFileFailReads(true);
OpenDBAndBackupEngine();
test_backup_env_->SetDummySequentialFileFailReads(false);
test_backup_env_->SetDummySequentialFile(false);
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
std::vector<BackupInfo> backup_infos;
backup_engine_->GetBackupInfo(&backup_infos);
ASSERT_EQ(1, backup_infos.size());
ASSERT_EQ(2, backup_infos[0].backup_id);
}
} // anon namespace
} // namespace rocksdb

Loading…
Cancel
Save