Some fixes to BackupableDB

Summary:
(1) Report corruption if backup meta file has tailing data that was not read. This should fix: https://github.com/facebook/rocksdb/issues/81 (also, @sdong reported similar issue)
(2) Don't use OS buffer when copying file to backup directory. We don't need the file in cache since we won't be reading it twice
(3) Don't delete newer backups when somebody tries to backup the diverged DB (restore from older backup, add new data, try to backup). Rather, just fail the new backup.

Test Plan: backupable_db_test

Reviewers: ljin, dhruba, sdong

Reviewed By: ljin

CC: leveldb, sdong

Differential Revision: https://reviews.facebook.net/D16287
main
Igor Canadi 11 years ago
parent a1d56e73ec
commit e3f396f1ea
  1. 1
      HISTORY.md
  2. 70
      include/utilities/backupable_db.h
  3. 77
      utilities/backupable/backupable_db.cc
  4. 34
      utilities/backupable/backupable_db_test.cc

@ -8,6 +8,7 @@
* By default, checksums are verified on every read from database
* Added is_manual_compaction to CompactionFilter::Context
* Added "virtual void WaitForJoin() = 0" in class Env
* Removed BackupEngine::DeleteBackupsNewerThan() function
### New Features
* If we find one truncated record at the end of the MANIFEST or WAL files,

@ -58,14 +58,13 @@ struct BackupableDBOptions {
explicit BackupableDBOptions(const std::string& _backup_dir,
Env* _backup_env = nullptr,
bool _share_table_files = true,
Logger* _info_log = nullptr,
bool _sync = true,
bool _destroy_old_data = false) :
backup_dir(_backup_dir),
backup_env(_backup_env),
info_log(_info_log),
sync(_sync),
destroy_old_data(_destroy_old_data) { }
Logger* _info_log = nullptr, bool _sync = true,
bool _destroy_old_data = false)
: backup_dir(_backup_dir),
backup_env(_backup_env),
info_log(_info_log),
sync(_sync),
destroy_old_data(_destroy_old_data) {}
};
typedef uint32_t BackupID;
@ -99,8 +98,6 @@ class BackupEngine {
const std::string& wal_dir) = 0;
virtual Status RestoreDBFromLatestBackup(const std::string& db_dir,
const std::string& wal_dir) = 0;
virtual void DeleteBackupsNewerThan(uint64_t sequence_number) = 0;
};
// Stack your DB with BackupableDB to be able to backup the DB
@ -138,32 +135,33 @@ class BackupableDB : public StackableDB {
// Use this class to access information about backups and restore from them
class RestoreBackupableDB {
public:
RestoreBackupableDB(Env* db_env, const BackupableDBOptions& options);
~RestoreBackupableDB();
// Returns info about backups in backup_info
void GetBackupInfo(std::vector<BackupInfo>* backup_info);
// restore from backup with backup_id
// IMPORTANT -- if options_.share_table_files == true and you restore DB
// from some backup that is not the latest, and you start creating new
// backups from the new DB, all the backups that were newer than the
// backup you restored from will be deleted
//
// Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
// If you try creating a new backup now, old backups 4 and 5 will be deleted
// and new backup with ID 4 will be created.
Status RestoreDBFromBackup(BackupID backup_id, const std::string& db_dir,
const std::string& wal_dir);
// restore from the latest backup
Status RestoreDBFromLatestBackup(const std::string& db_dir,
const std::string& wal_dir);
// deletes old backups, keeping latest num_backups_to_keep alive
Status PurgeOldBackups(uint32_t num_backups_to_keep);
// deletes a specific backup
Status DeleteBackup(BackupID backup_id);
public:
RestoreBackupableDB(Env* db_env, const BackupableDBOptions& options);
~RestoreBackupableDB();
// Returns info about backups in backup_info
void GetBackupInfo(std::vector<BackupInfo>* backup_info);
// restore from backup with backup_id
// IMPORTANT -- if options_.share_table_files == true and you restore DB
// from some backup that is not the latest, and you start creating new
// backups from the new DB, they will probably fail
//
// Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
// If you add new data to the DB and try creating a new backup now, the
// database will diverge from backups 4 and 5 and the new backup will fail.
// If you want to create new backup, you will first have to delete backups 4
// and 5.
Status RestoreDBFromBackup(BackupID backup_id, const std::string& db_dir,
const std::string& wal_dir);
// restore from the latest backup
Status RestoreDBFromLatestBackup(const std::string& db_dir,
const std::string& wal_dir);
// deletes old backups, keeping latest num_backups_to_keep alive
Status PurgeOldBackups(uint32_t num_backups_to_keep);
// deletes a specific backup
Status DeleteBackup(BackupID backup_id);
private:
BackupEngine* backup_engine_;

@ -46,8 +46,6 @@ class BackupEngineImpl : public BackupEngine {
return RestoreDBFromBackup(latest_backup_id_, db_dir, wal_dir);
}
void DeleteBackupsNewerThan(uint64_t sequence_number);
private:
struct FileInfo {
FileInfo(const std::string& fname, uint64_t sz, uint32_t checksum)
@ -185,6 +183,12 @@ class BackupEngineImpl : public BackupEngine {
Env* db_env_;
Env* backup_env_;
// directories
unique_ptr<Directory> backup_directory_;
unique_ptr<Directory> shared_directory_;
unique_ptr<Directory> meta_directory_;
unique_ptr<Directory> private_directory_;
static const size_t copy_file_buffer_size_ = 5 * 1024 * 1024LL; // 5MB
};
@ -203,11 +207,17 @@ BackupEngineImpl::BackupEngineImpl(Env* db_env,
// create all the dirs we need
backup_env_->CreateDirIfMissing(GetAbsolutePath());
backup_env_->NewDirectory(GetAbsolutePath(), &backup_directory_);
if (options_.share_table_files) {
backup_env_->CreateDirIfMissing(GetAbsolutePath(GetSharedFileRel()));
backup_env_->NewDirectory(GetAbsolutePath(GetSharedFileRel()),
&shared_directory_);
}
backup_env_->CreateDirIfMissing(GetAbsolutePath(GetPrivateDirRel()));
backup_env_->NewDirectory(GetAbsolutePath(GetPrivateDirRel()),
&private_directory_);
backup_env_->CreateDirIfMissing(GetBackupMetaDir());
backup_env_->NewDirectory(GetBackupMetaDir(), &meta_directory_);
std::vector<std::string> backup_meta_files;
backup_env_->GetChildren(GetBackupMetaDir(), &backup_meta_files);
@ -279,26 +289,6 @@ BackupEngineImpl::BackupEngineImpl(Env* db_env,
BackupEngineImpl::~BackupEngineImpl() { LogFlush(options_.info_log); }
void BackupEngineImpl::DeleteBackupsNewerThan(uint64_t sequence_number) {
for (auto backup : backups_) {
if (backup.second.GetSequenceNumber() > sequence_number) {
Log(options_.info_log,
"Deleting backup %u because sequence number (%" PRIu64
") is newer than %" PRIu64 "",
backup.first, backup.second.GetSequenceNumber(), sequence_number);
backup.second.Delete();
obsolete_backups_.push_back(backup.first);
}
}
for (auto ob : obsolete_backups_) {
backups_.erase(backups_.find(ob));
}
auto itr = backups_.end();
latest_backup_id_ = (itr == backups_.begin()) ? 0 : (--itr)->first;
PutLatestBackupFileContents(latest_backup_id_); // Ignore errors
GarbageCollection(false);
}
Status BackupEngineImpl::CreateNewBackup(DB* db, bool flush_before_backup) {
Status s;
std::vector<std::string> live_files;
@ -348,9 +338,8 @@ Status BackupEngineImpl::CreateNewBackup(DB* db, bool flush_before_backup) {
return Status::Corruption("Can't parse file name. This is very bad");
}
// we should only get sst, manifest and current files here
assert(type == kTableFile ||
type == kDescriptorFile ||
type == kCurrentFile);
assert(type == kTableFile || type == kDescriptorFile ||
type == kCurrentFile);
// rules:
// * if it's kTableFile, than it's shared
@ -394,6 +383,28 @@ Status BackupEngineImpl::CreateNewBackup(DB* db, bool flush_before_backup) {
// install the newly created backup meta! (atomic)
s = PutLatestBackupFileContents(new_backup_id);
}
if (s.ok() && options_.sync) {
unique_ptr<Directory> backup_private_directory;
backup_env_->NewDirectory(
GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)),
&backup_private_directory);
if (backup_private_directory != nullptr) {
backup_private_directory->Fsync();
}
if (private_directory_ != nullptr) {
private_directory_->Fsync();
}
if (meta_directory_ != nullptr) {
meta_directory_->Fsync();
}
if (shared_directory_ != nullptr) {
shared_directory_->Fsync();
}
if (backup_directory_ != nullptr) {
backup_directory_->Fsync();
}
}
if (!s.ok()) {
// clean all the files we might have created
Log(options_.info_log, "Backup failed -- %s", s.ToString().c_str());
@ -591,6 +602,7 @@ Status BackupEngineImpl::CopyFile(const std::string& src,
unique_ptr<SequentialFile> src_file;
EnvOptions env_options;
env_options.use_mmap_writes = false;
env_options.use_os_buffer = false;
if (size != nullptr) {
*size = 0;
}
@ -706,6 +718,7 @@ Status BackupEngineImpl::CalculateChecksum(const std::string& src, Env* src_env,
EnvOptions env_options;
env_options.use_mmap_writes = false;
env_options.use_os_buffer = false;
std::unique_ptr<SequentialFile> src_file;
Status s = src_env->NewSequentialFile(src, &src_file, env_options);
@ -893,6 +906,9 @@ Status BackupEngineImpl::BackupMeta::LoadFromFile(
uint64_t size;
s = env_->GetFileSize(backup_dir + "/" + filename, &size);
if (!s.ok()) {
return s;
}
if (line.empty()) {
return Status::Corruption("File checksum is missing");
@ -913,6 +929,11 @@ Status BackupEngineImpl::BackupMeta::LoadFromFile(
files.emplace_back(filename, size, checksum_value);
}
if (s.ok() && data.size() > 0) {
// file has to be read completely. if not, we count it as corruption
s = Status::Corruption("Tailing data in backup meta file");
}
if (s.ok()) {
for (const auto& file_info : files) {
s = AddFile(file_info);
@ -968,11 +989,7 @@ Status BackupEngineImpl::BackupMeta::StoreToFile(bool sync) {
BackupableDB::BackupableDB(DB* db, const BackupableDBOptions& options)
: StackableDB(db),
backup_engine_(new BackupEngineImpl(db->GetEnv(), options)) {
if (options.share_table_files) {
backup_engine_->DeleteBackupsNewerThan(GetLatestSequenceNumber());
}
}
backup_engine_(new BackupEngineImpl(db->GetEnv(), options)) {}
BackupableDB::~BackupableDB() {
delete backup_engine_;

@ -709,27 +709,37 @@ TEST(BackupableDBTest, OnlineIntegrationTest) {
CloseRestoreDB();
}
TEST(BackupableDBTest, DeleteNewerBackups) {
TEST(BackupableDBTest, FailOverwritingBackups) {
options_.write_buffer_size = 1024 * 1024 * 1024; // 1GB
// create backups 1, 2, 3, 4, 5
OpenBackupableDB(true);
for (int i = 0; i < 5; ++i) {
FillDB(db_.get(), 100 * i, 100 * (i + 1));
ASSERT_OK(db_->CreateNewBackup(!!(i % 2)));
ASSERT_OK(db_->CreateNewBackup(true));
CloseBackupableDB();
OpenBackupableDB(false);
}
CloseBackupableDB();
// backup 3 is fine
AssertBackupConsistency(3, 0, 300, 500);
// this should delete backups 4 and 5
OpenBackupableDB();
CloseBackupableDB();
// backups 4 and 5 don't exist
// restore 3
OpenRestoreDB();
Status s = restore_db_->RestoreDBFromBackup(4, dbname_, dbname_);
ASSERT_TRUE(s.IsNotFound());
s = restore_db_->RestoreDBFromBackup(5, dbname_, dbname_);
ASSERT_TRUE(s.IsNotFound());
ASSERT_OK(restore_db_->RestoreDBFromBackup(3, dbname_, dbname_));
CloseRestoreDB();
OpenBackupableDB(false);
FillDB(db_.get(), 0, 300);
Status s = db_->CreateNewBackup(true);
// the new backup fails because new table files
// clash with old table files from backups 4 and 5
// (since write_buffer_size is huge, we can be sure that
// each backup will generate only one sst file and that
// a file generated by a new backup is the same as
// sst file generated by backup 4)
ASSERT_TRUE(s.IsCorruption());
ASSERT_OK(db_->DeleteBackup(4));
ASSERT_OK(db_->DeleteBackup(5));
// now, the backup can succeed
ASSERT_OK(db_->CreateNewBackup(true));
}
TEST(BackupableDBTest, NoShareTableFiles) {

Loading…
Cancel
Save