Adding backup engine info (#454)

master
Linh Tran Tuan 4 years ago committed by GitHub
parent 5d5c3a4565
commit d3d10d54b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 85
      src/backup.rs
  2. 7
      src/slice_transform.rs
  3. 8
      tests/test_backup.rs

@ -15,10 +15,25 @@
use crate::{ffi, Error, DB}; use crate::{ffi, Error, DB};
use libc::c_int; use libc::{c_int, c_uchar};
use std::ffi::CString; use std::ffi::CString;
use std::path::Path; use std::path::Path;
/// Represents information of a backup including timestamp of the backup
/// and the size (please note that sum of all backups' sizes is bigger than the actual
/// size of the backup directory because some data is shared by multiple backups).
/// Backups are identified by their always-increasing IDs.
pub struct BackupEngineInfo {
/// Timestamp of the backup
pub timestamp: i64,
/// ID of the backup
pub backup_id: u32,
/// Size of the backup
pub size: u64,
/// Number of files related to the backup
pub num_files: u32,
}
pub struct BackupEngine { pub struct BackupEngine {
inner: *mut ffi::rocksdb_backup_engine_t, inner: *mut ffi::rocksdb_backup_engine_t,
} }
@ -58,10 +73,28 @@ impl BackupEngine {
Ok(BackupEngine { inner: be }) Ok(BackupEngine { inner: be })
} }
/// Captures the state of the database in the latest backup.
///
/// Note: no flush before backup is performed. User might want to
/// use `create_new_backup_flush` instead.
pub fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> { pub fn create_new_backup(&mut self, db: &DB) -> Result<(), Error> {
self.create_new_backup_flush(db, false)
}
/// Captures the state of the database in the latest backup.
///
/// Set flush_before_backup=true to avoid losing unflushed key/value
/// pairs from the memtable.
pub fn create_new_backup_flush(
&mut self,
db: &DB,
flush_before_backup: bool,
) -> Result<(), Error> {
unsafe { unsafe {
ffi_try!(ffi::rocksdb_backup_engine_create_new_backup( ffi_try!(ffi::rocksdb_backup_engine_create_new_backup_flush(
self.inner, db.inner, self.inner,
db.inner,
flush_before_backup as c_uchar,
)); ));
Ok(()) Ok(())
} }
@ -137,6 +170,52 @@ impl BackupEngine {
} }
Ok(()) Ok(())
} }
/// Checks that each file exists and that the size of the file matches our
/// expectations. it does not check file checksum.
///
/// If this BackupEngine created the backup, it compares the files' current
/// sizes against the number of bytes written to them during creation.
/// Otherwise, it compares the files' current sizes against their sizes when
/// the BackupEngine was opened.
pub fn verify_backup(&self, backup_id: u32) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_backup_engine_verify_backup(
self.inner, backup_id,
));
}
Ok(())
}
/// Get a list of all backups together with information on timestamp of the backup
/// and the size (please note that sum of all backups' sizes is bigger than the actual
/// size of the backup directory because some data is shared by multiple backups).
/// Backups are identified by their always-increasing IDs.
///
/// You can perform this function safely, even with other BackupEngine performing
/// backups on the same directory
pub fn get_backup_info(&self) -> Vec<BackupEngineInfo> {
unsafe {
let i = ffi::rocksdb_backup_engine_get_backup_info(self.inner);
let n = ffi::rocksdb_backup_engine_info_count(i);
let mut info = Vec::with_capacity(n as usize);
for index in 0..n {
info.push(BackupEngineInfo {
timestamp: ffi::rocksdb_backup_engine_info_timestamp(i, index),
backup_id: ffi::rocksdb_backup_engine_info_backup_id(i, index),
size: ffi::rocksdb_backup_engine_info_size(i, index),
num_files: ffi::rocksdb_backup_engine_info_number_files(i, index),
})
}
// destroy backup info object
ffi::rocksdb_backup_engine_info_destroy(i);
info
}
}
} }
impl BackupEngineOptions { impl BackupEngineOptions {

@ -111,9 +111,6 @@ pub unsafe extern "C" fn in_domain_callback(
) -> u8 { ) -> u8 {
let cb = &mut *(raw_cb as *mut TransformCallback); let cb = &mut *(raw_cb as *mut TransformCallback);
let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize); let key = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
if let Some(in_domain) = cb.in_domain_fn { cb.in_domain_fn
in_domain(key) as u8 .map_or(0xff, |in_domain| in_domain(key) as u8)
} else {
0xff
}
} }

@ -38,6 +38,14 @@ fn backup_restore() {
let mut backup_engine = BackupEngine::open(&backup_opts, &backup_path).unwrap(); let mut backup_engine = BackupEngine::open(&backup_opts, &backup_path).unwrap();
assert!(backup_engine.create_new_backup(&db).is_ok()); assert!(backup_engine.create_new_backup(&db).is_ok());
// check backup info
let info = backup_engine.get_backup_info();
assert!(!info.is_empty());
info.iter().for_each(|i| {
assert!(backup_engine.verify_backup(i.backup_id).is_ok());
assert!(i.size > 0);
});
let mut restore_option = RestoreOptions::default(); let mut restore_option = RestoreOptions::default();
restore_option.set_keep_log_files(false); // true to keep log files restore_option.set_keep_log_files(false); // true to keep log files
let restore_status = backup_engine.restore_from_latest_backup( let restore_status = backup_engine.restore_from_latest_backup(

Loading…
Cancel
Save