Add checkpoint support to BlobDB (#7959)

Summary:
The patch adds checkpoint support to BlobDB. Blob files are hard linked or
copied, depending on whether the checkpoint directory is on the same filesystem
or not, similarly to table files.

TODO: Add support for blob files to `ExportColumnFamily` and to the checksum
verification logic used by backup/restore.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7959

Test Plan: Ran `make check` and the crash test for a while.

Reviewed By: riversand963

Differential Revision: D26434768

Pulled By: ltamasi

fbshipit-source-id: 994be55a8dc08133028250760fca440d2c7c4dc5
main
Levi Tamasi 4 years ago committed by Facebook GitHub Bot
parent 0743eba0c4
commit dab4fe5bcd
  1. 5
      db_stress_tool/db_stress_test_base.cc
  2. 14
      include/rocksdb/utilities/checkpoint.h
  3. 1
      tools/db_crashtest.py
  4. 38
      utilities/checkpoint/checkpoint_impl.cc
  5. 28
      utilities/checkpoint/checkpoint_impl.h
  6. 54
      utilities/checkpoint/checkpoint_test.cc

@ -2217,12 +2217,11 @@ void StressTest::Open() {
if ((options_.enable_blob_files || options_.enable_blob_garbage_collection || if ((options_.enable_blob_files || options_.enable_blob_garbage_collection ||
FLAGS_allow_setting_blob_options_dynamically) && FLAGS_allow_setting_blob_options_dynamically) &&
(FLAGS_use_merge || FLAGS_enable_compaction_filter || (FLAGS_use_merge || FLAGS_enable_compaction_filter ||
FLAGS_checkpoint_one_in > 0 || FLAGS_backup_one_in > 0 || FLAGS_backup_one_in > 0 || FLAGS_best_efforts_recovery)) {
FLAGS_best_efforts_recovery)) {
fprintf( fprintf(
stderr, stderr,
"Integrated BlobDB is currently incompatible with Merge, compaction " "Integrated BlobDB is currently incompatible with Merge, compaction "
"filters, checkpoints, backup/restore, and best-effort recovery\n"); "filters, backup/restore, and best-effort recovery\n");
exit(1); exit(1);
} }

@ -24,13 +24,13 @@ class Checkpoint {
// Creates a Checkpoint object to be used for creating openable snapshots // Creates a Checkpoint object to be used for creating openable snapshots
static Status Create(DB* db, Checkpoint** checkpoint_ptr); static Status Create(DB* db, Checkpoint** checkpoint_ptr);
// Builds an openable snapshot of RocksDB on the same disk, which // Builds an openable snapshot of RocksDB. checkpoint_dir should contain an
// accepts an output directory on the same disk, and under the directory // absolute path. The specified directory should not exist, since it will be
// (1) hard-linked SST files pointing to existing live SST files // created by the API.
// SST files will be copied if output directory is on a different filesystem // When a checkpoint is created,
// (2) a copied manifest files and other files // (1) SST and blob files are hard linked if the output directory is on the
// The directory should not already exist and will be created by this API. // same filesystem as the database, and copied otherwise.
// The directory will be an absolute path // (2) other required files (like MANIFEST) are always copied.
// log_size_for_flush: if the total log file size is equal or larger than // log_size_for_flush: if the total log file size is equal or larger than
// this value, then a flush is triggered for all the column families. The // this value, then a flush is triggered for all the column families. The
// default value is 0, which means flush is always triggered. If you move // default value is 0, which means flush is always triggered. If you move

@ -279,7 +279,6 @@ blob_params = {
"use_merge": 0, "use_merge": 0,
"enable_compaction_filter": 0, "enable_compaction_filter": 0,
"backup_one_in": 0, "backup_one_in": 0,
"checkpoint_one_in": 0,
} }
def finalize_and_sanitize(src_params): def finalize_and_sanitize(src_params):

@ -14,6 +14,7 @@
#include <algorithm> #include <algorithm>
#include <cinttypes> #include <cinttypes>
#include <string> #include <string>
#include <tuple>
#include <vector> #include <vector>
#include "db/wal_manager.h" #include "db/wal_manager.h"
@ -269,10 +270,11 @@ Status CheckpointImpl::CreateCustomCheckpoint(
size_t wal_size = live_wal_files.size(); size_t wal_size = live_wal_files.size();
// process live files, non-table files first // process live files, non-table, non-blob files first
std::string manifest_fname, current_fname; std::string manifest_fname, current_fname;
// record table files for processing next // record table and blob files for processing next
std::vector<std::pair<std::string, uint64_t>> live_table_files; std::vector<std::tuple<std::string, uint64_t, FileType>>
live_table_and_blob_files;
for (auto& live_file : live_files) { for (auto& live_file : live_files) {
if (!s.ok()) { if (!s.ok()) {
break; break;
@ -284,8 +286,8 @@ Status CheckpointImpl::CreateCustomCheckpoint(
s = Status::Corruption("Can't parse file name. This is very bad"); s = Status::Corruption("Can't parse file name. This is very bad");
break; break;
} }
// we should only get sst, options, manifest and current files here // we should only get sst, blob, options, manifest and current files here
assert(type == kTableFile || type == kDescriptorFile || assert(type == kTableFile || type == kBlobFile || type == kDescriptorFile ||
type == kCurrentFile || type == kOptionsFile); type == kCurrentFile || type == kOptionsFile);
assert(live_file.size() > 0 && live_file[0] == '/'); assert(live_file.size() > 0 && live_file[0] == '/');
if (type == kCurrentFile) { if (type == kCurrentFile) {
@ -297,15 +299,16 @@ Status CheckpointImpl::CreateCustomCheckpoint(
} else if (type == kDescriptorFile) { } else if (type == kDescriptorFile) {
manifest_fname = live_file; manifest_fname = live_file;
} }
if (type != kTableFile) {
// copy non-table files here if (type != kTableFile && type != kBlobFile) {
// copy non-table, non-blob files here
// * if it's kDescriptorFile, limit the size to manifest_file_size // * if it's kDescriptorFile, limit the size to manifest_file_size
s = copy_file_cb(db_->GetName(), live_file, s = copy_file_cb(db_->GetName(), live_file,
(type == kDescriptorFile) ? manifest_file_size : 0, type, (type == kDescriptorFile) ? manifest_file_size : 0, type,
kUnknownFileChecksumFuncName, kUnknownFileChecksum); kUnknownFileChecksumFuncName, kUnknownFileChecksum);
} else { } else {
// process table files below // process table and blob files below
live_table_files.push_back(make_pair(live_file, number)); live_table_and_blob_files.emplace_back(live_file, number, type);
} }
} }
@ -322,19 +325,21 @@ Status CheckpointImpl::CreateCustomCheckpoint(
manifest_file_size, checksum_list.get()); manifest_file_size, checksum_list.get());
} }
// copy/hard link live table files // copy/hard link live table and blob files
for (auto& ltf : live_table_files) { for (const auto& file : live_table_and_blob_files) {
if (!s.ok()) { if (!s.ok()) {
break; break;
} }
std::string& src_fname = ltf.first;
uint64_t number = ltf.second; const std::string& src_fname = std::get<0>(file);
const uint64_t number = std::get<1>(file);
const FileType type = std::get<2>(file);
// rules: // rules:
// * for kTableFile, attempt hard link instead of copy. // * for kTableFile/kBlobFile, attempt hard link instead of copy.
// * but can't hard link across filesystems. // * but can't hard link across filesystems.
if (same_fs) { if (same_fs) {
s = link_file_cb(db_->GetName(), src_fname, kTableFile); s = link_file_cb(db_->GetName(), src_fname, type);
if (s.IsNotSupported()) { if (s.IsNotSupported()) {
same_fs = false; same_fs = false;
s = Status::OK(); s = Status::OK();
@ -347,6 +352,7 @@ Status CheckpointImpl::CreateCustomCheckpoint(
// we ignore the checksums either they are not required or we failed to // we ignore the checksums either they are not required or we failed to
// obtain the checksum lsit for old table files that have no file // obtain the checksum lsit for old table files that have no file
// checksums // checksums
// TODO: support this verification for blob files
if (get_live_table_checksum) { if (get_live_table_checksum) {
// find checksum info for table files // find checksum info for table files
Status search = checksum_list->SearchOneFileChecksum( Status search = checksum_list->SearchOneFileChecksum(
@ -359,7 +365,7 @@ Status CheckpointImpl::CreateCustomCheckpoint(
assert(checksum_value == kUnknownFileChecksum); assert(checksum_value == kUnknownFileChecksum);
} }
} }
s = copy_file_cb(db_->GetName(), src_fname, 0, kTableFile, checksum_name, s = copy_file_cb(db_->GetName(), src_fname, 0, type, checksum_name,
checksum_value); checksum_value);
} }
} }

@ -16,31 +16,15 @@ namespace ROCKSDB_NAMESPACE {
class CheckpointImpl : public Checkpoint { class CheckpointImpl : public Checkpoint {
public: public:
// Creates a Checkpoint object to be used for creating openable snapshots
explicit CheckpointImpl(DB* db) : db_(db) {} explicit CheckpointImpl(DB* db) : db_(db) {}
// Builds an openable snapshot of RocksDB on the same disk, which Status CreateCheckpoint(const std::string& checkpoint_dir,
// accepts an output directory on the same disk, and under the directory uint64_t log_size_for_flush,
// (1) hard-linked SST files pointing to existing live SST files uint64_t* sequence_number_ptr) override;
// SST files will be copied if output directory is on a different filesystem
// (2) a copied manifest files and other files
// The directory should not already exist and will be created by this API.
// The directory will be an absolute path
using Checkpoint::CreateCheckpoint;
virtual Status CreateCheckpoint(const std::string& checkpoint_dir,
uint64_t log_size_for_flush,
uint64_t* sequence_number_ptr) override;
// Exports all live SST files of a specified Column Family onto export_dir Status ExportColumnFamily(ColumnFamilyHandle* handle,
// and returning SST files information in metadata. const std::string& export_dir,
// - SST files will be created as hard links when the directory specified ExportImportFilesMetaData** metadata) override;
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
using Checkpoint::ExportColumnFamily;
virtual Status ExportColumnFamily(
ColumnFamilyHandle* handle, const std::string& export_dir,
ExportImportFilesMetaData** metadata) override;
// Checkpoint logic can be customized by providing callbacks for link, copy, // Checkpoint logic can be customized by providing callbacks for link, copy,
// or create. // or create.

@ -313,6 +313,60 @@ TEST_F(CheckpointTest, GetSnapshotLink) {
} }
} }
TEST_F(CheckpointTest, CheckpointWithBlob) {
// Create a database with a blob file
Options options = CurrentOptions();
options.create_if_missing = true;
options.enable_blob_files = true;
options.min_blob_size = 0;
Reopen(options);
constexpr char key[] = "key";
constexpr char blob[] = "blob";
ASSERT_OK(Put(key, blob));
ASSERT_OK(Flush());
// Create a checkpoint
Checkpoint* checkpoint = nullptr;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
std::unique_ptr<Checkpoint> checkpoint_guard(checkpoint);
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
// Make sure it contains the blob file
std::vector<std::string> files;
ASSERT_OK(env_->GetChildren(snapshot_name_, &files));
bool blob_file_found = false;
for (const auto& file : files) {
uint64_t number = 0;
FileType type = kWalFile;
if (ParseFileName(file, &number, &type) && type == kBlobFile) {
blob_file_found = true;
break;
}
}
ASSERT_TRUE(blob_file_found);
// Make sure the checkpoint can be opened and the blob value read
options.create_if_missing = false;
DB* checkpoint_db = nullptr;
ASSERT_OK(DB::Open(options, snapshot_name_, &checkpoint_db));
std::unique_ptr<DB> checkpoint_db_guard(checkpoint_db);
PinnableSlice value;
ASSERT_OK(checkpoint_db->Get(
ReadOptions(), checkpoint_db->DefaultColumnFamily(), key, &value));
ASSERT_EQ(value, blob);
}
TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
// Create a database // Create a database
auto options = CurrentOptions(); auto options = CurrentOptions();

Loading…
Cancel
Save