Add checkpoint support to BlobDB (#7959)

Summary:
The patch adds checkpoint support to BlobDB. Blob files are hard linked or
copied, depending on whether the checkpoint directory is on the same filesystem
or not, similarly to table files.

TODO: Add support for blob files to `ExportColumnFamily` and to the checksum
verification logic used by backup/restore.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7959

Test Plan: Ran `make check` and the crash test for a while.

Reviewed By: riversand963

Differential Revision: D26434768

Pulled By: ltamasi

fbshipit-source-id: 994be55a8dc08133028250760fca440d2c7c4dc5
main
Levi Tamasi 4 years ago committed by Facebook GitHub Bot
parent 0743eba0c4
commit dab4fe5bcd
  1. 5
      db_stress_tool/db_stress_test_base.cc
  2. 14
      include/rocksdb/utilities/checkpoint.h
  3. 1
      tools/db_crashtest.py
  4. 38
      utilities/checkpoint/checkpoint_impl.cc
  5. 22
      utilities/checkpoint/checkpoint_impl.h
  6. 54
      utilities/checkpoint/checkpoint_test.cc

@ -2217,12 +2217,11 @@ void StressTest::Open() {
if ((options_.enable_blob_files || options_.enable_blob_garbage_collection ||
FLAGS_allow_setting_blob_options_dynamically) &&
(FLAGS_use_merge || FLAGS_enable_compaction_filter ||
FLAGS_checkpoint_one_in > 0 || FLAGS_backup_one_in > 0 ||
FLAGS_best_efforts_recovery)) {
FLAGS_backup_one_in > 0 || FLAGS_best_efforts_recovery)) {
fprintf(
stderr,
"Integrated BlobDB is currently incompatible with Merge, compaction "
"filters, checkpoints, backup/restore, and best-effort recovery\n");
"filters, backup/restore, and best-effort recovery\n");
exit(1);
}

@ -24,13 +24,13 @@ class Checkpoint {
// Creates a Checkpoint object to be used for creating openable snapshots
static Status Create(DB* db, Checkpoint** checkpoint_ptr);
// Builds an openable snapshot of RocksDB on the same disk, which
// accepts an output directory on the same disk, and under the directory
// (1) hard-linked SST files pointing to existing live SST files
// SST files will be copied if output directory is on a different filesystem
// (2) a copied manifest files and other files
// The directory should not already exist and will be created by this API.
// The directory will be an absolute path
// Builds an openable snapshot of RocksDB. checkpoint_dir should contain an
// absolute path. The specified directory should not exist, since it will be
// created by the API.
// When a checkpoint is created,
// (1) SST and blob files are hard linked if the output directory is on the
// same filesystem as the database, and copied otherwise.
// (2) other required files (like MANIFEST) are always copied.
// log_size_for_flush: if the total log file size is equal or larger than
// this value, then a flush is triggered for all the column families. The
// default value is 0, which means flush is always triggered. If you move

@ -279,7 +279,6 @@ blob_params = {
"use_merge": 0,
"enable_compaction_filter": 0,
"backup_one_in": 0,
"checkpoint_one_in": 0,
}
def finalize_and_sanitize(src_params):

@ -14,6 +14,7 @@
#include <algorithm>
#include <cinttypes>
#include <string>
#include <tuple>
#include <vector>
#include "db/wal_manager.h"
@ -269,10 +270,11 @@ Status CheckpointImpl::CreateCustomCheckpoint(
size_t wal_size = live_wal_files.size();
// process live files, non-table files first
// process live files, non-table, non-blob files first
std::string manifest_fname, current_fname;
// record table files for processing next
std::vector<std::pair<std::string, uint64_t>> live_table_files;
// record table and blob files for processing next
std::vector<std::tuple<std::string, uint64_t, FileType>>
live_table_and_blob_files;
for (auto& live_file : live_files) {
if (!s.ok()) {
break;
@ -284,8 +286,8 @@ Status CheckpointImpl::CreateCustomCheckpoint(
s = Status::Corruption("Can't parse file name. This is very bad");
break;
}
// we should only get sst, options, manifest and current files here
assert(type == kTableFile || type == kDescriptorFile ||
// we should only get sst, blob, options, manifest and current files here
assert(type == kTableFile || type == kBlobFile || type == kDescriptorFile ||
type == kCurrentFile || type == kOptionsFile);
assert(live_file.size() > 0 && live_file[0] == '/');
if (type == kCurrentFile) {
@ -297,15 +299,16 @@ Status CheckpointImpl::CreateCustomCheckpoint(
} else if (type == kDescriptorFile) {
manifest_fname = live_file;
}
if (type != kTableFile) {
// copy non-table files here
if (type != kTableFile && type != kBlobFile) {
// copy non-table, non-blob files here
// * if it's kDescriptorFile, limit the size to manifest_file_size
s = copy_file_cb(db_->GetName(), live_file,
(type == kDescriptorFile) ? manifest_file_size : 0, type,
kUnknownFileChecksumFuncName, kUnknownFileChecksum);
} else {
// process table files below
live_table_files.push_back(make_pair(live_file, number));
// process table and blob files below
live_table_and_blob_files.emplace_back(live_file, number, type);
}
}
@ -322,19 +325,21 @@ Status CheckpointImpl::CreateCustomCheckpoint(
manifest_file_size, checksum_list.get());
}
// copy/hard link live table files
for (auto& ltf : live_table_files) {
// copy/hard link live table and blob files
for (const auto& file : live_table_and_blob_files) {
if (!s.ok()) {
break;
}
std::string& src_fname = ltf.first;
uint64_t number = ltf.second;
const std::string& src_fname = std::get<0>(file);
const uint64_t number = std::get<1>(file);
const FileType type = std::get<2>(file);
// rules:
// * for kTableFile, attempt hard link instead of copy.
// * for kTableFile/kBlobFile, attempt hard link instead of copy.
// * but can't hard link across filesystems.
if (same_fs) {
s = link_file_cb(db_->GetName(), src_fname, kTableFile);
s = link_file_cb(db_->GetName(), src_fname, type);
if (s.IsNotSupported()) {
same_fs = false;
s = Status::OK();
@ -347,6 +352,7 @@ Status CheckpointImpl::CreateCustomCheckpoint(
// we ignore the checksums either they are not required or we failed to
// obtain the checksum lsit for old table files that have no file
// checksums
// TODO: support this verification for blob files
if (get_live_table_checksum) {
// find checksum info for table files
Status search = checksum_list->SearchOneFileChecksum(
@ -359,7 +365,7 @@ Status CheckpointImpl::CreateCustomCheckpoint(
assert(checksum_value == kUnknownFileChecksum);
}
}
s = copy_file_cb(db_->GetName(), src_fname, 0, kTableFile, checksum_name,
s = copy_file_cb(db_->GetName(), src_fname, 0, type, checksum_name,
checksum_value);
}
}

@ -16,30 +16,14 @@ namespace ROCKSDB_NAMESPACE {
class CheckpointImpl : public Checkpoint {
public:
// Creates a Checkpoint object to be used for creating openable snapshots
explicit CheckpointImpl(DB* db) : db_(db) {}
// Builds an openable snapshot of RocksDB on the same disk, which
// accepts an output directory on the same disk, and under the directory
// (1) hard-linked SST files pointing to existing live SST files
// SST files will be copied if output directory is on a different filesystem
// (2) a copied manifest files and other files
// The directory should not already exist and will be created by this API.
// The directory will be an absolute path
using Checkpoint::CreateCheckpoint;
virtual Status CreateCheckpoint(const std::string& checkpoint_dir,
Status CreateCheckpoint(const std::string& checkpoint_dir,
uint64_t log_size_for_flush,
uint64_t* sequence_number_ptr) override;
// Exports all live SST files of a specified Column Family onto export_dir
// and returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
using Checkpoint::ExportColumnFamily;
virtual Status ExportColumnFamily(
ColumnFamilyHandle* handle, const std::string& export_dir,
Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata) override;
// Checkpoint logic can be customized by providing callbacks for link, copy,

@ -313,6 +313,60 @@ TEST_F(CheckpointTest, GetSnapshotLink) {
}
}
TEST_F(CheckpointTest, CheckpointWithBlob) {
// Create a database with a blob file
Options options = CurrentOptions();
options.create_if_missing = true;
options.enable_blob_files = true;
options.min_blob_size = 0;
Reopen(options);
constexpr char key[] = "key";
constexpr char blob[] = "blob";
ASSERT_OK(Put(key, blob));
ASSERT_OK(Flush());
// Create a checkpoint
Checkpoint* checkpoint = nullptr;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
std::unique_ptr<Checkpoint> checkpoint_guard(checkpoint);
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
// Make sure it contains the blob file
std::vector<std::string> files;
ASSERT_OK(env_->GetChildren(snapshot_name_, &files));
bool blob_file_found = false;
for (const auto& file : files) {
uint64_t number = 0;
FileType type = kWalFile;
if (ParseFileName(file, &number, &type) && type == kBlobFile) {
blob_file_found = true;
break;
}
}
ASSERT_TRUE(blob_file_found);
// Make sure the checkpoint can be opened and the blob value read
options.create_if_missing = false;
DB* checkpoint_db = nullptr;
ASSERT_OK(DB::Open(options, snapshot_name_, &checkpoint_db));
std::unique_ptr<DB> checkpoint_db_guard(checkpoint_db);
PinnableSlice value;
ASSERT_OK(checkpoint_db->Get(
ReadOptions(), checkpoint_db->DefaultColumnFamily(), key, &value));
ASSERT_EQ(value, blob);
}
TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
// Create a database
auto options = CurrentOptions();

Loading…
Cancel
Save