Integrated BlobDB for backup/restore support (#8129)

Summary:
Add support for blob files for backup/restore like table files.
    Since DB session ID is currently not supported for blob files (there is no place to store it in
    the header), so for blob files uses the
    kLegacyCrc32cAndFileSize naming scheme even if
    share_files_with_checksum_naming is set to kUseDbSessionId.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/8129

Test Plan: Add new test units

Reviewed By: ltamasi

Differential Revision: D27408510

Pulled By: akankshamahajan15

fbshipit-source-id: b27434d189a639ef3e6ad165c61a143a2daaf06e
main
Akanksha Mahajan 4 years ago committed by Facebook GitHub Bot
parent a4e82a3cca
commit d52b520d51
  1. 1
      HISTORY.md
  2. 7
      env/composite_env_wrapper.h
  3. 11
      env/env.cc
  4. 8
      env/file_system.cc
  5. 12
      include/rocksdb/env.h
  6. 12
      include/rocksdb/file_system.h
  7. 47
      include/rocksdb/utilities/backupable_db.h
  8. 44
      utilities/backupable/backupable_db.cc
  9. 51
      utilities/backupable/backupable_db_test.cc

@ -18,6 +18,7 @@
### Public API change ### Public API change
* Added `TableProperties::slow_compression_estimated_data_size` and `TableProperties::fast_compression_estimated_data_size`. When `ColumnFamilyOptions::sample_for_compression > 0`, they estimate what `TableProperties::data_size` would have been if the "fast" or "slow" (see `ColumnFamilyOptions::sample_for_compression` API doc for definitions) compression had been used instead. * Added `TableProperties::slow_compression_estimated_data_size` and `TableProperties::fast_compression_estimated_data_size`. When `ColumnFamilyOptions::sample_for_compression > 0`, they estimate what `TableProperties::data_size` would have been if the "fast" or "slow" (see `ColumnFamilyOptions::sample_for_compression` API doc for definitions) compression had been used instead.
* Update DB::StartIOTrace and remove Env object from the arguments as its redundant and DB already has Env object that is passed down to IOTracer::StartIOTrace * Update DB::StartIOTrace and remove Env object from the arguments as its redundant and DB already has Env object that is passed down to IOTracer::StartIOTrace
* For new integrated BlobDB, add support for blob files for backup/restore like table files. Because of current limitations, blob files always use the kLegacyCrc32cAndFileSize naming scheme, and incremental backups must read and checksum all blob files in a DB, even for files that are already backed up.
### New Features ### New Features
* Added the ability to open BackupEngine backups as read-only DBs, using BackupInfo::name_for_open and env_for_open provided by BackupEngine::GetBackupInfo() with include_file_details=true. * Added the ability to open BackupEngine backups as read-only DBs, using BackupInfo::name_for_open and env_for_open provided by BackupEngine::GetBackupInfo() with include_file_details=true.

@ -218,7 +218,12 @@ class CompositeEnv : public Env {
return file_system_->OptimizeForCompactionTableRead( return file_system_->OptimizeForCompactionTableRead(
FileOptions(env_options), db_options); FileOptions(env_options), db_options);
} }
EnvOptions OptimizeForBlobFileRead(
const EnvOptions& env_options,
const ImmutableDBOptions& db_options) const override {
return file_system_->OptimizeForBlobFileRead(FileOptions(env_options),
db_options);
}
// This seems to clash with a macro on Windows, so #undef it here // This seems to clash with a macro on Windows, so #undef it here
#ifdef GetFreeSpace #ifdef GetFreeSpace
#undef GetFreeSpace #undef GetFreeSpace

11
env/env.cc vendored

@ -536,6 +536,11 @@ class LegacyFileSystemWrapper : public FileSystem {
const ImmutableDBOptions& db_options) const override { const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForCompactionTableRead(file_options, db_options); return target_->OptimizeForCompactionTableRead(file_options, db_options);
} }
FileOptions OptimizeForBlobFileRead(
const FileOptions& file_options,
const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForBlobFileRead(file_options, db_options);
}
#ifdef GetFreeSpace #ifdef GetFreeSpace
#undef GetFreeSpace #undef GetFreeSpace
@ -997,6 +1002,12 @@ EnvOptions Env::OptimizeForCompactionTableRead(
optimized_env_options.use_direct_reads = db_options.use_direct_reads; optimized_env_options.use_direct_reads = db_options.use_direct_reads;
return optimized_env_options; return optimized_env_options;
} }
EnvOptions Env::OptimizeForBlobFileRead(
const EnvOptions& env_options, const ImmutableDBOptions& db_options) const {
EnvOptions optimized_env_options(env_options);
optimized_env_options.use_direct_reads = db_options.use_direct_reads;
return optimized_env_options;
}
EnvOptions::EnvOptions(const DBOptions& options) { EnvOptions::EnvOptions(const DBOptions& options) {
AssignEnvOptions(this, options); AssignEnvOptions(this, options);

@ -83,6 +83,14 @@ FileOptions FileSystem::OptimizeForCompactionTableRead(
return optimized_file_options; return optimized_file_options;
} }
FileOptions FileSystem::OptimizeForBlobFileRead(
const FileOptions& file_options,
const ImmutableDBOptions& db_options) const {
FileOptions optimized_file_options(file_options);
optimized_file_options.use_direct_reads = db_options.use_direct_reads;
return optimized_file_options;
}
IOStatus WriteStringToFile(FileSystem* fs, const Slice& data, IOStatus WriteStringToFile(FileSystem* fs, const Slice& data,
const std::string& fname, bool should_sync) { const std::string& fname, bool should_sync) {
std::unique_ptr<FSWritableFile> file; std::unique_ptr<FSWritableFile> file;

@ -546,6 +546,13 @@ class Env {
const EnvOptions& env_options, const EnvOptions& env_options,
const ImmutableDBOptions& db_options) const; const ImmutableDBOptions& db_options) const;
// OptimizeForBlobFileRead will create a new EnvOptions object that
// is a copy of the EnvOptions in the parameters, but is optimized for reading
// blob files.
virtual EnvOptions OptimizeForBlobFileRead(
const EnvOptions& env_options,
const ImmutableDBOptions& db_options) const;
// Returns the status of all threads that belong to the current Env. // Returns the status of all threads that belong to the current Env.
virtual Status GetThreadList(std::vector<ThreadStatus>* /*thread_list*/) { virtual Status GetThreadList(std::vector<ThreadStatus>* /*thread_list*/) {
return Status::NotSupported("Env::GetThreadList() not supported."); return Status::NotSupported("Env::GetThreadList() not supported.");
@ -1495,6 +1502,11 @@ class EnvWrapper : public Env {
const ImmutableDBOptions& db_options) const override { const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForCompactionTableRead(env_options, db_options); return target_->OptimizeForCompactionTableRead(env_options, db_options);
} }
EnvOptions OptimizeForBlobFileRead(
const EnvOptions& env_options,
const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForBlobFileRead(env_options, db_options);
}
Status GetFreeSpace(const std::string& path, uint64_t* diskfree) override { Status GetFreeSpace(const std::string& path, uint64_t* diskfree) override {
return target_->GetFreeSpace(path, diskfree); return target_->GetFreeSpace(path, diskfree);
} }

@ -550,6 +550,13 @@ class FileSystem {
const FileOptions& file_options, const FileOptions& file_options,
const ImmutableDBOptions& db_options) const; const ImmutableDBOptions& db_options) const;
// OptimizeForBlobFileRead will create a new FileOptions object that
// is a copy of the FileOptions in the parameters, but is optimized for
// reading blob files.
virtual FileOptions OptimizeForBlobFileRead(
const FileOptions& file_options,
const ImmutableDBOptions& db_options) const;
// This seems to clash with a macro on Windows, so #undef it here // This seems to clash with a macro on Windows, so #undef it here
#ifdef GetFreeSpace #ifdef GetFreeSpace
#undef GetFreeSpace #undef GetFreeSpace
@ -1289,6 +1296,11 @@ class FileSystemWrapper : public FileSystem {
const ImmutableDBOptions& db_options) const override { const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForCompactionTableRead(file_options, db_options); return target_->OptimizeForCompactionTableRead(file_options, db_options);
} }
FileOptions OptimizeForBlobFileRead(
const FileOptions& file_options,
const ImmutableDBOptions& db_options) const override {
return target_->OptimizeForBlobFileRead(file_options, db_options);
}
IOStatus GetFreeSpace(const std::string& path, const IOOptions& options, IOStatus GetFreeSpace(const std::string& path, const IOOptions& options,
uint64_t* diskfree, IODebugContext* dbg) override { uint64_t* diskfree, IODebugContext* dbg) override {
return target_->GetFreeSpace(path, options, diskfree, dbg); return target_->GetFreeSpace(path, options, diskfree, dbg);

@ -42,11 +42,14 @@ struct BackupableDBOptions {
// Default: nullptr // Default: nullptr
Env* backup_env; Env* backup_env;
// If share_table_files == true, backup will assume that table files with // share_table_files supports table and blob files.
// same name have the same contents. This enables incremental backups and //
// avoids unnecessary data copies. // If share_table_files == true, the backup directory will share table and
// If share_table_files == false, each backup will be on its own and will // blob files among backups, to save space among backups of the same DB and to
// not share any data with other backups. // enable incremental backups by only copying new files.
// If share_table_files == false, each backup will be on its own and will not
// share any data with other backups.
//
// default: true // default: true
bool share_table_files; bool share_table_files;
@ -92,13 +95,15 @@ struct BackupableDBOptions {
// Default: nullptr // Default: nullptr
std::shared_ptr<RateLimiter> restore_rate_limiter{nullptr}; std::shared_ptr<RateLimiter> restore_rate_limiter{nullptr};
// share_files_with_checksum supports table and blob files.
//
// Only used if share_table_files is set to true. Setting to false is // Only used if share_table_files is set to true. Setting to false is
// DEPRECATED and potentially dangerous because in that case BackupEngine // DEPRECATED and potentially dangerous because in that case BackupEngine
// can lose data if backing up databases with distinct or divergent // can lose data if backing up databases with distinct or divergent
// history, for example if restoring from a backup other than the latest, // history, for example if restoring from a backup other than the latest,
// writing to the DB, and creating another backup. Setting to true (default) // writing to the DB, and creating another backup. Setting to true (default)
// prevents these issues by ensuring that different table files (SSTs) with // prevents these issues by ensuring that different table files (SSTs) and
// the same number are treated as distinct. See // blob files with the same number are treated as distinct. See
// share_files_with_checksum_naming and ShareFilesNaming. // share_files_with_checksum_naming and ShareFilesNaming.
// //
// Default: true // Default: true
@ -126,11 +131,12 @@ struct BackupableDBOptions {
int max_valid_backups_to_open; int max_valid_backups_to_open;
// ShareFilesNaming describes possible naming schemes for backup // ShareFilesNaming describes possible naming schemes for backup
// table file names when the table files are stored in the shared_checksum // table and blob file names when they are stored in the
// directory (i.e., both share_table_files and share_files_with_checksum // shared_checksum directory (i.e., both share_table_files and
// are true). // share_files_with_checksum are true).
enum ShareFilesNaming : uint32_t { enum ShareFilesNaming : uint32_t {
// Backup SST filenames are <file_number>_<crc32c>_<file_size>.sst // Backup blob filenames are <file_number>_<crc32c>_<file_size>.blob and
// backup SST filenames are <file_number>_<crc32c>_<file_size>.sst
// where <crc32c> is an unsigned decimal integer. This is the // where <crc32c> is an unsigned decimal integer. This is the
// original/legacy naming scheme for share_files_with_checksum, // original/legacy naming scheme for share_files_with_checksum,
// with two problems: // with two problems:
@ -139,6 +145,7 @@ struct BackupableDBOptions {
// * Determining the name to use requires computing the checksum, // * Determining the name to use requires computing the checksum,
// so generally requires reading the whole file even if the file // so generally requires reading the whole file even if the file
// is already backed up. // is already backed up.
//
// ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR **
kLegacyCrc32cAndFileSize = 1U, kLegacyCrc32cAndFileSize = 1U,
@ -148,6 +155,8 @@ struct BackupableDBOptions {
// the value is a DB session id, not a checksum. // the value is a DB session id, not a checksum.
// //
// Exceptions: // Exceptions:
// * For blob files, kLegacyCrc32cAndFileSize is used as currently
// db_session_id is not supported by the blob file format.
// * For old SST files without a DB session id, kLegacyCrc32cAndFileSize // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize
// will be used instead, matching the names assigned by RocksDB versions // will be used instead, matching the names assigned by RocksDB versions
// not supporting the newer naming scheme. // not supporting the newer naming scheme.
@ -158,25 +167,25 @@ struct BackupableDBOptions {
// If not already part of the naming scheme, insert // If not already part of the naming scheme, insert
// _<file_size> // _<file_size>
// before .sst in the name. In case of user code actually parsing the // before .sst and .blob in the name. In case of user code actually parsing
// last _<whatever> before the .sst as the file size, this preserves that // the last _<whatever> before the .sst and .blob as the file size, this
// feature of kLegacyCrc32cAndFileSize. In other words, this option makes // preserves that feature of kLegacyCrc32cAndFileSize. In other words, this
// official that unofficial feature of the backup metadata. // option makes official that unofficial feature of the backup metadata.
// //
// We do not consider SST file sizes to have sufficient entropy to // We do not consider SST and blob file sizes to have sufficient entropy to
// contribute significantly to naming uniqueness. // contribute significantly to naming uniqueness.
kFlagIncludeFileSize = 1U << 31, kFlagIncludeFileSize = 1U << 31,
kMaskNamingFlags = ~kMaskNoNamingFlags, kMaskNamingFlags = ~kMaskNoNamingFlags,
}; };
// Naming option for share_files_with_checksum table files. See // Naming option for share_files_with_checksum table and blob files. See
// ShareFilesNaming for details. // ShareFilesNaming for details.
// //
// Modifying this option cannot introduce a downgrade compatibility issue // Modifying this option cannot introduce a downgrade compatibility issue
// because RocksDB can read, restore, and delete backups using different file // because RocksDB can read, restore, and delete backups using different file
// names, and it's OK for a backup directory to use a mixture of table file // names, and it's OK for a backup directory to use a mixture of table and
// naming schemes. // blob files naming schemes.
// //
// However, modifying this option and saving more backups to the same // However, modifying this option and saving more backups to the same
// directory can lead to the same file getting saved again to that // directory can lead to the same file getting saved again to that

@ -749,7 +749,7 @@ class BackupEngineImpl {
BackupID backup_id, bool shared, const std::string& src_dir, BackupID backup_id, bool shared, const std::string& src_dir,
const std::string& fname, // starts with "/" const std::string& fname, // starts with "/"
const EnvOptions& src_env_options, RateLimiter* rate_limiter, const EnvOptions& src_env_options, RateLimiter* rate_limiter,
uint64_t size_bytes, uint64_t size_limit = 0, FileType file_type, uint64_t size_bytes, uint64_t size_limit = 0,
bool shared_checksum = false, bool shared_checksum = false,
std::function<void()> progress_callback = []() {}, std::function<void()> progress_callback = []() {},
const std::string& contents = std::string(), const std::string& contents = std::string(),
@ -1287,7 +1287,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
Log(options_.info_log, "add file for backup %s", fname.c_str()); Log(options_.info_log, "add file for backup %s", fname.c_str());
uint64_t size_bytes = 0; uint64_t size_bytes = 0;
Status st; Status st;
if (type == kTableFile) { if (type == kTableFile || type == kBlobFile) {
st = db_env_->GetFileSize(src_dirname + fname, &size_bytes); st = db_env_->GetFileSize(src_dirname + fname, &size_bytes);
} }
EnvOptions src_env_options; EnvOptions src_env_options;
@ -1304,6 +1304,10 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
src_env_options = src_env_options =
db_env_->OptimizeForManifestRead(src_raw_env_options); db_env_->OptimizeForManifestRead(src_raw_env_options);
break; break;
case kBlobFile:
src_env_options = db_env_->OptimizeForBlobFileRead(
src_raw_env_options, ImmutableDBOptions(db_options));
break;
default: default:
// Other backed up files (like options file) are not read by live // Other backed up files (like options file) are not read by live
// DB, so don't need to worry about avoiding mixing buffered and // DB, so don't need to worry about avoiding mixing buffered and
@ -1314,22 +1318,25 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
if (st.ok()) { if (st.ok()) {
st = AddBackupFileWorkItem( st = AddBackupFileWorkItem(
live_dst_paths, backup_items_to_finish, new_backup_id, live_dst_paths, backup_items_to_finish, new_backup_id,
options_.share_table_files && type == kTableFile, src_dirname, options_.share_table_files &&
fname, src_env_options, rate_limiter, size_bytes, (type == kTableFile || type == kBlobFile),
size_limit_bytes, src_dirname, fname, src_env_options, rate_limiter, type,
options_.share_files_with_checksum && type == kTableFile, size_bytes, size_limit_bytes,
options_.share_files_with_checksum &&
(type == kTableFile || type == kBlobFile),
options.progress_callback, "" /* contents */, options.progress_callback, "" /* contents */,
checksum_func_name, checksum_val); checksum_func_name, checksum_val);
} }
return st; return st;
} /* copy_file_cb */, } /* copy_file_cb */,
[&](const std::string& fname, const std::string& contents, FileType) { [&](const std::string& fname, const std::string& contents,
FileType type) {
Log(options_.info_log, "add file for backup %s", fname.c_str()); Log(options_.info_log, "add file for backup %s", fname.c_str());
return AddBackupFileWorkItem( return AddBackupFileWorkItem(
live_dst_paths, backup_items_to_finish, new_backup_id, live_dst_paths, backup_items_to_finish, new_backup_id,
false /* shared */, "" /* src_dir */, fname, false /* shared */, "" /* src_dir */, fname,
EnvOptions() /* src_env_options */, rate_limiter, contents.size(), EnvOptions() /* src_env_options */, rate_limiter, type,
0 /* size_limit */, false /* shared_checksum */, contents.size(), 0 /* size_limit */, false /* shared_checksum */,
options.progress_callback, contents); options.progress_callback, contents);
} /* create_file_cb */, } /* create_file_cb */,
&sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64, &sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64,
@ -1872,9 +1879,10 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
std::vector<BackupAfterCopyOrCreateWorkItem>& backup_items_to_finish, std::vector<BackupAfterCopyOrCreateWorkItem>& backup_items_to_finish,
BackupID backup_id, bool shared, const std::string& src_dir, BackupID backup_id, bool shared, const std::string& src_dir,
const std::string& fname, const EnvOptions& src_env_options, const std::string& fname, const EnvOptions& src_env_options,
RateLimiter* rate_limiter, uint64_t size_bytes, uint64_t size_limit, RateLimiter* rate_limiter, FileType file_type, uint64_t size_bytes,
bool shared_checksum, std::function<void()> progress_callback, uint64_t size_limit, bool shared_checksum,
const std::string& contents, const std::string& src_checksum_func_name, std::function<void()> progress_callback, const std::string& contents,
const std::string& src_checksum_func_name,
const std::string& src_checksum_str) { const std::string& src_checksum_str) {
assert(!fname.empty() && fname[0] == '/'); assert(!fname.empty() && fname[0] == '/');
assert(contents.empty() != src_dir.empty()); assert(contents.empty() != src_dir.empty());
@ -1887,8 +1895,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
std::string checksum_hex; std::string checksum_hex;
// Whenever a default checksum function name is passed in, we will compares // Whenever a default checksum function name is passed in, we will compares
// the corresponding checksum values after copying. Note that only table files // the corresponding checksum values after copying. Note that only table and
// may have a known checksum function name passed in. // blob files may have a known checksum function name passed in.
// //
// If no default checksum function name is passed in and db session id is not // If no default checksum function name is passed in and db session id is not
// available, we will calculate the checksum *before* copying in two cases // available, we will calculate the checksum *before* copying in two cases
@ -1906,7 +1914,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
// Step 1: Prepare the relative path to destination // Step 1: Prepare the relative path to destination
if (shared && shared_checksum) { if (shared && shared_checksum) {
if (GetNamingNoFlags() != BackupableDBOptions::kLegacyCrc32cAndFileSize) { if (GetNamingNoFlags() != BackupableDBOptions::kLegacyCrc32cAndFileSize &&
file_type != kBlobFile) {
// Prepare db_session_id to add to the file name // Prepare db_session_id to add to the file name
// Ignore the returned status // Ignore the returned status
// In the failed cases, db_id and db_session_id will be empty // In the failed cases, db_id and db_session_id will be empty
@ -1938,6 +1947,11 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
// shared_checksum/<file_number>_<db_session_id>.sst // shared_checksum/<file_number>_<db_session_id>.sst
// Otherwise, dst_relative is of the form // Otherwise, dst_relative is of the form
// shared_checksum/<file_number>_<checksum>_<size>.sst // shared_checksum/<file_number>_<checksum>_<size>.sst
//
// For blob files, db_session_id is not supported with the blob file format.
// It uses original/legacy naming scheme.
// dst_relative will be of the form:
// shared_checksum/<file_number>_<checksum>_<size>.blob
dst_relative = GetSharedFileWithChecksum(dst_relative, checksum_hex, dst_relative = GetSharedFileWithChecksum(dst_relative, checksum_hex,
size_bytes, db_session_id); size_bytes, db_session_id);
dst_relative_tmp = GetSharedFileWithChecksumRel(dst_relative, true); dst_relative_tmp = GetSharedFileWithChecksumRel(dst_relative, true);

@ -632,6 +632,7 @@ class BackupableDBTest : public testing::Test {
options_.write_buffer_size = 1 << 17; // 128KB options_.write_buffer_size = 1 << 17; // 128KB
options_.env = test_db_env_.get(); options_.env = test_db_env_.get();
options_.wal_dir = dbname_; options_.wal_dir = dbname_;
options_.enable_blob_files = true;
// Create logger // Create logger
DBOptions logger_options; DBOptions logger_options;
@ -894,15 +895,18 @@ class BackupableDBTest : public testing::Test {
void AssertDirectoryFilesMatchRegex(const std::string& dir, void AssertDirectoryFilesMatchRegex(const std::string& dir,
const std::regex& pattern, const std::regex& pattern,
const std::string& file_type,
int minimum_count) { int minimum_count) {
std::vector<FileAttributes> children; std::vector<FileAttributes> children;
ASSERT_OK(file_manager_->GetChildrenFileAttributes(dir, &children)); ASSERT_OK(file_manager_->GetChildrenFileAttributes(dir, &children));
int found_count = 0; int found_count = 0;
for (const auto& child : children) { for (const auto& child : children) {
const std::string match("match"); if (EndsWith(child.name, file_type)) {
ASSERT_EQ(match, std::regex_replace(child.name, pattern, match)); ASSERT_TRUE(std::regex_match(child.name, pattern))
<< "File name " << child.name << " does not match regex.";
++found_count; ++found_count;
} }
}
ASSERT_GE(found_count, minimum_count); ASSERT_GE(found_count, minimum_count);
} }
@ -1433,9 +1437,8 @@ TEST_F(BackupableDBTest, CorruptFileMaintainSize) {
} }
// Corrupt a blob file but maintain its size // Corrupt a blob file but maintain its size
TEST_F(BackupableDBTest, CorruptBlobFileMaintainSize) { TEST_P(BackupableDBTestWithParam, CorruptBlobFileMaintainSize) {
const int keys_iteration = 5000; const int keys_iteration = 5000;
options_.enable_blob_files = true;
OpenDBAndBackupEngine(true); OpenDBAndBackupEngine(true);
// create a backup // create a backup
FillDB(db_.get(), 0, keys_iteration); FillDB(db_.get(), 0, keys_iteration);
@ -1450,12 +1453,18 @@ TEST_F(BackupableDBTest, CorruptBlobFileMaintainSize) {
std::string file_to_corrupt; std::string file_to_corrupt;
std::vector<FileAttributes> children; std::vector<FileAttributes> children;
const std::string dir = backupdir_ + "/private/1";
std::string dir = backupdir_;
if (backupable_options_->share_files_with_checksum) {
dir += "/shared_checksum";
} else {
dir += "/shared";
}
ASSERT_OK(file_manager_->GetChildrenFileAttributes(dir, &children)); ASSERT_OK(file_manager_->GetChildrenFileAttributes(dir, &children));
for (const auto& child : children) { for (const auto& child : children) {
if (child.name.find(".blob") != std::string::npos && if (EndsWith(child.name, ".blob") && child.size_bytes != 0) {
child.size_bytes != 0) {
// corrupt the blob files by replacing its content by file_size random // corrupt the blob files by replacing its content by file_size random
// bytes // bytes
ASSERT_OK( ASSERT_OK(
@ -1509,7 +1518,6 @@ TEST_F(BackupableDBTest, TableFileCorruptedBeforeBackup) {
// been corrupted and the blob file checksum is stored in the DB manifest // been corrupted and the blob file checksum is stored in the DB manifest
TEST_F(BackupableDBTest, BlobFileCorruptedBeforeBackup) { TEST_F(BackupableDBTest, BlobFileCorruptedBeforeBackup) {
const int keys_iteration = 50000; const int keys_iteration = 50000;
options_.enable_blob_files = true;
OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */,
kNoShare); kNoShare);
@ -1575,7 +1583,6 @@ TEST_P(BackupableDBTestWithParam, TableFileCorruptedBeforeBackup) {
// the case when backup blob files will be stored in a shared directory // the case when backup blob files will be stored in a shared directory
TEST_P(BackupableDBTestWithParam, BlobFileCorruptedBeforeBackup) { TEST_P(BackupableDBTestWithParam, BlobFileCorruptedBeforeBackup) {
const int keys_iteration = 50000; const int keys_iteration = 50000;
options_.enable_blob_files = true;
OpenDBAndBackupEngine(true /* destroy_old_data */); OpenDBAndBackupEngine(true /* destroy_old_data */);
FillDB(db_.get(), 0, keys_iteration); FillDB(db_.get(), 0, keys_iteration);
CloseAndReopenDB(/*read_only*/ true); CloseAndReopenDB(/*read_only*/ true);
@ -1986,6 +1993,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNaming) {
"[0-9]+_s[0-9A-Z]{20}_[0-9]+[.]sst"}, "[0-9]+_s[0-9A-Z]{20}_[0-9]+[.]sst"},
}; };
const std::string blobfile_pattern = "[0-9]+_[0-9]+_[0-9]+[.]blob";
for (const auto& pair : option_to_expected) { for (const auto& pair : option_to_expected) {
CloseAndReopenDB(); CloseAndReopenDB();
backupable_options_->share_files_with_checksum_naming = pair.first; backupable_options_->share_files_with_checksum_naming = pair.first;
@ -1994,12 +2003,15 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNaming) {
CloseDBAndBackupEngine(); CloseDBAndBackupEngine();
AssertBackupConsistency(1, 0, keys_iteration, keys_iteration * 2); AssertBackupConsistency(1, 0, keys_iteration, keys_iteration * 2);
AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum", AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum",
std::regex(pair.second), std::regex(pair.second), ".sst",
1 /* minimum_count */); 1 /* minimum_count */);
if (std::string::npos != pair.second.find("_[0-9]+[.]sst")) { if (std::string::npos != pair.second.find("_[0-9]+[.]sst")) {
AssertDirectoryFilesSizeIndicators(backupdir_ + "/shared_checksum", AssertDirectoryFilesSizeIndicators(backupdir_ + "/shared_checksum",
1 /* minimum_count */); 1 /* minimum_count */);
} }
AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum",
std::regex(blobfile_pattern), ".blob",
1 /* minimum_count */);
} }
} }
@ -2024,6 +2036,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsOldFileNaming) {
// Old names should always be used on old files // Old names should always be used on old files
const std::regex expected("[0-9]+_[0-9]+_[0-9]+[.]sst"); const std::regex expected("[0-9]+_[0-9]+_[0-9]+[.]sst");
const std::string blobfile_pattern = "[0-9]+_[0-9]+_[0-9]+[.]blob";
for (ShareFilesNaming option : {kNamingDefault, kUseDbSessionId}) { for (ShareFilesNaming option : {kNamingDefault, kUseDbSessionId}) {
CloseAndReopenDB(); CloseAndReopenDB();
backupable_options_->share_files_with_checksum_naming = option; backupable_options_->share_files_with_checksum_naming = option;
@ -2032,6 +2046,9 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsOldFileNaming) {
CloseDBAndBackupEngine(); CloseDBAndBackupEngine();
AssertBackupConsistency(1, 0, keys_iteration, keys_iteration * 2); AssertBackupConsistency(1, 0, keys_iteration, keys_iteration * 2);
AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum", expected, AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum", expected,
".sst", 1 /* minimum_count */);
AssertDirectoryFilesMatchRegex(backupdir_ + "/shared_checksum",
std::regex(blobfile_pattern), ".blob",
1 /* minimum_count */); 1 /* minimum_count */);
} }
@ -2175,9 +2192,9 @@ TEST_F(BackupableDBTest, FileSizeForIncremental) {
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true /*flush*/)); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true /*flush*/));
CloseDBAndBackupEngine(); CloseDBAndBackupEngine();
// Corrupt backup SST // Corrupt backup SST and blob file
ASSERT_OK(file_manager_->GetChildrenFileAttributes(shared_dir, &children)); ASSERT_OK(file_manager_->GetChildrenFileAttributes(shared_dir, &children));
ASSERT_EQ(children.size(), 1U); // one sst ASSERT_EQ(children.size(), 2U); // one sst and one blob file
for (const auto& child : children) { for (const auto& child : children) {
if (child.name.size() > 4 && child.size_bytes > 0) { if (child.name.size() > 4 && child.size_bytes > 0) {
ASSERT_OK( ASSERT_OK(
@ -2234,10 +2251,10 @@ TEST_F(BackupableDBTest, FileSizeForIncremental) {
OpenDBAndBackupEngine(false, false, share); OpenDBAndBackupEngine(false, false, share);
ASSERT_OK(db_->Put(WriteOptions(), "y", Random(42).RandomString(500))); ASSERT_OK(db_->Put(WriteOptions(), "y", Random(42).RandomString(500)));
// Count backup SSTs // Count backup SSTs and blob files.
children.clear(); children.clear();
ASSERT_OK(file_manager_->GetChildrenFileAttributes(shared_dir, &children)); ASSERT_OK(file_manager_->GetChildrenFileAttributes(shared_dir, &children));
ASSERT_EQ(children.size(), 2U); // two sst ASSERT_EQ(children.size(), 4U); // two sst and two blob files
// Try create backup 3 // Try create backup 3
s = backup_engine_->CreateNewBackup(db_.get(), true /*flush*/); s = backup_engine_->CreateNewBackup(db_.get(), true /*flush*/);
@ -2250,18 +2267,18 @@ TEST_F(BackupableDBTest, FileSizeForIncremental) {
// Acceptable to call it corruption if size is not in name and // Acceptable to call it corruption if size is not in name and
// db session id collision is practically impossible. // db session id collision is practically impossible.
EXPECT_TRUE(s.IsCorruption()); EXPECT_TRUE(s.IsCorruption());
EXPECT_EQ(children.size(), 2U); // no SST added EXPECT_EQ(children.size(), 4U); // no SST/Blob file added
} else if (option == share_no_checksum) { } else if (option == share_no_checksum) {
// Good to call it corruption if both backups cannot be // Good to call it corruption if both backups cannot be
// accommodated. // accommodated.
EXPECT_TRUE(s.IsCorruption()); EXPECT_TRUE(s.IsCorruption());
EXPECT_EQ(children.size(), 2U); // no SST added EXPECT_EQ(children.size(), 4U); // no SST/Blob file added
} else { } else {
// Since opening a DB seems sufficient for detecting size corruption // Since opening a DB seems sufficient for detecting size corruption
// on the DB side, this should be a good thing, ... // on the DB side, this should be a good thing, ...
EXPECT_OK(s); EXPECT_OK(s);
// ... as long as we did actually treat it as a distinct SST file. // ... as long as we did actually treat it as a distinct SST file.
EXPECT_EQ(children.size(), 3U); // Another SST added EXPECT_EQ(children.size(), 6U); // Another SST and blob added
} }
CloseDBAndBackupEngine(); CloseDBAndBackupEngine();
ASSERT_OK(DestroyDB(dbname_, options_)); ASSERT_OK(DestroyDB(dbname_, options_));

Loading…
Cancel
Save