use bindgen to generate ffi

Closes #128.
master
Nikhil Benesch 7 years ago
parent 633caccace
commit 9d0a5e2819
No known key found for this signature in database
GPG Key ID: C5779C9897F0FCCB
  1. 1
      librocksdb-sys/Cargo.toml
  2. 18
      librocksdb-sys/build.rs
  3. 1021
      librocksdb-sys/src/lib.rs
  4. 18
      src/db.rs
  5. 12
      src/db_options.rs
  6. 1
      src/lib.rs

@ -24,3 +24,4 @@ const-cstr = "0.2"
[build-dependencies] [build-dependencies]
gcc = { version = "0.3", features = ["parallel"] } gcc = { version = "0.3", features = ["parallel"] }
make-cmd = "0.1" make-cmd = "0.1"
bindgen = "0.29"

@ -1,6 +1,9 @@
extern crate gcc; extern crate gcc;
extern crate bindgen;
use std::env;
use std::fs; use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) { fn link(name: &str, bundled: bool) {
use std::env::var; use std::env::var;
@ -27,7 +30,18 @@ fn build_rocksdb() {
println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/"); println!("cargo:rerun-if-changed=rocksdb/");
let mut config = gcc::Config::new(); let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
let mut config = gcc::Build::new();
config.include("rocksdb/include/"); config.include("rocksdb/include/");
config.include("rocksdb/"); config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/"); config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
@ -108,7 +122,7 @@ fn build_rocksdb() {
} }
fn build_snappy() { fn build_snappy() {
let mut config = gcc::Config::new(); let mut config = gcc::Build::new();
config.include("snappy/"); config.include("snappy/");
config.include("."); config.include(".");

File diff suppressed because it is too large Load Diff

@ -55,10 +55,10 @@ pub enum DBCompactionStyle {
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
pub enum DBRecoveryMode { pub enum DBRecoveryMode {
TolerateCorruptedTailRecords = ffi::rocksdb_recovery_mode_tolerate_corrupted_tail_records as isize, TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
AbsoluteConsistency = ffi::rocksdb_recovery_mode_absolute_consistency as isize, AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
PointInTime = ffi::rocksdb_recovery_mode_point_in_time as isize, PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
SkipAnyCorruptedRecord = ffi::rocksdb_recovery_mode_skip_any_corrupted_record as isize, SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
} }
/// An atomic batch of write operations. /// An atomic batch of write operations.
@ -616,22 +616,22 @@ impl DB {
.map(|cf| CString::new(cf.as_bytes()).unwrap()) .map(|cf| CString::new(cf.as_bytes()).unwrap())
.collect(); .collect();
let cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect(); let mut cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect();
// These handles will be populated by DB. // These handles will be populated by DB.
let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect(); let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect();
// TODO(tyler) allow options to be passed in. // TODO(tyler) allow options to be passed in.
let cfopts: Vec<_> = cfs_v.iter() let mut cfopts: Vec<_> = cfs_v.iter()
.map(|_| unsafe { ffi::rocksdb_options_create() as *const _ }) .map(|_| unsafe { ffi::rocksdb_options_create() as *const _ })
.collect(); .collect();
unsafe { unsafe {
db = ffi_try!(ffi::rocksdb_open_column_families(opts.inner, db = ffi_try!(ffi::rocksdb_open_column_families(opts.inner,
cpath.as_ptr() as *const _, cpath.as_ptr(),
cfs_v.len() as c_int, cfs_v.len() as c_int,
cfnames.as_ptr() as *const _, cfnames.as_mut_ptr(),
cfopts.as_ptr(), cfopts.as_mut_ptr(),
cfhandles.as_mut_ptr())); cfhandles.as_mut_ptr()));
} }

@ -190,9 +190,9 @@ impl Options {
/// ``` /// ```
pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) { pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
unsafe { unsafe {
let level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect(); let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
ffi::rocksdb_options_set_compression_per_level(self.inner, ffi::rocksdb_options_set_compression_per_level(self.inner,
level_types.as_ptr(), level_types.as_mut_ptr(),
level_types.len() as size_t) level_types.len() as size_t)
} }
} }
@ -369,10 +369,6 @@ impl Options {
allow as c_uchar) } allow as c_uchar) }
} }
pub fn set_disable_data_sync(&mut self, disable: bool) {
unsafe { ffi::rocksdb_options_set_disable_data_sync(self.inner, disable as c_int) }
}
/// Enable direct I/O mode for reading /// Enable direct I/O mode for reading
/// they may or may not improve performance depending on the use case /// they may or may not improve performance depending on the use case
/// ///
@ -603,9 +599,9 @@ impl Options {
/// use rocksdb::Options; /// use rocksdb::Options;
/// ///
/// let mut opts = Options::default(); /// let mut opts = Options::default();
/// opts.set_max_bytes_for_level_multiplier(4); /// opts.set_max_bytes_for_level_multiplier(4.0);
/// ``` /// ```
pub fn set_max_bytes_for_level_multiplier(&mut self, mul: i32) { pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
unsafe { unsafe {
ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul); ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
} }

@ -124,7 +124,6 @@ pub struct BlockBasedOptions {
/// opts.set_max_open_files(10000); /// opts.set_max_open_files(10000);
/// opts.set_use_fsync(false); /// opts.set_use_fsync(false);
/// opts.set_bytes_per_sync(8388608); /// opts.set_bytes_per_sync(8388608);
/// opts.set_disable_data_sync(false);
/// opts.optimize_for_point_lookup(1024); /// opts.optimize_for_point_lookup(1024);
/// opts.set_table_cache_num_shard_bits(6); /// opts.set_table_cache_num_shard_bits(6);
/// opts.set_max_write_buffer_number(32); /// opts.set_max_write_buffer_number(32);

Loading…
Cancel
Save