Improved doc comments.

master
Alexander Regueiro 8 years ago
parent 57d4669d97
commit 882aaffd52
  1. 14
      src/rocksdb.rs
  2. 66
      src/rocksdb_options.rs

@ -37,7 +37,7 @@ pub fn new_cache(capacity: size_t) -> *mut ffi::rocksdb_cache_t {
unsafe { ffi::rocksdb_cache_create_lru(capacity) } unsafe { ffi::rocksdb_cache_create_lru(capacity) }
} }
/// RocksDB wrapper object. /// A RocksDB database.
pub struct DB { pub struct DB {
inner: *mut ffi::rocksdb_t, inner: *mut ffi::rocksdb_t,
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>, cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
@ -72,7 +72,7 @@ pub enum DBRecoveryMode {
SkipAnyCorruptedRecords = 3, SkipAnyCorruptedRecords = 3,
} }
/// An atomic batch of mutations. /// An atomic batch of write operations.
/// ///
/// Making an atomic commit of several writes: /// Making an atomic commit of several writes:
/// ///
@ -346,14 +346,14 @@ impl DB {
DB::open(&opts, path) DB::open(&opts, path)
} }
/// Open the database with specified options /// Open the database with the specified options.
pub fn open<P: AsRef<Path>>(opts: &Options, path: P) -> Result<DB, Error> { pub fn open<P: AsRef<Path>>(opts: &Options, path: P) -> Result<DB, Error> {
DB::open_cf(opts, path, &[]) DB::open_cf(opts, path, &[])
} }
/// Open a database with specified options and column family /// Open a database with specified options and column family.
/// ///
/// A column family must be created first by calling `DB::create_cf` /// A column family must be created first by calling `DB::create_cf`.
/// ///
/// # Panics /// # Panics
/// ///
@ -789,7 +789,7 @@ impl WriteBatch {
/// Remove the database entry for key. /// Remove the database entry for key.
/// ///
/// Returns Err if the key was not found /// Returns an error if the key was not found.
pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> { pub fn delete(&mut self, key: &[u8]) -> Result<(), Error> {
unsafe { unsafe {
ffi::rocksdb_writebatch_delete(self.inner, ffi::rocksdb_writebatch_delete(self.inner,
@ -880,7 +880,7 @@ impl Default for ReadOptions {
} }
} }
/// Wrapper around bytes stored in the database /// Vector of bytes stored in the database.
pub struct DBVector { pub struct DBVector {
base: *mut u8, base: *mut u8,
len: usize, len: usize,

@ -125,7 +125,7 @@ impl Options {
/// If true, the database will be created if it is missing. /// If true, the database will be created if it is missing.
/// ///
/// Default: false /// Default: `false`
/// ///
/// # Example /// # Example
/// ///
@ -247,12 +247,12 @@ impl Options {
} }
/// Sets the number of open files that can be used by the DB. You may need to /// Sets the number of open files that can be used by the DB. You may need to
/// increase this if your database has a large working set. Value -1 means /// increase this if your database has a large working set. Value `-1` means
/// files opened are always kept open. You can estimate number of files based /// files opened are always kept open. You can estimate number of files based
/// on target_file_size_base and target_file_size_multiplier for level-based /// on target_file_size_base and target_file_size_multiplier for level-based
/// compaction. For universal-style compaction, you can usually set it to -1. /// compaction. For universal-style compaction, you can usually set it to `-1`.
/// ///
/// Default: -1 /// Default: `-1`
/// ///
/// # Example /// # Example
/// ///
@ -273,7 +273,7 @@ impl Options {
/// This parameter should be set to true while storing data to /// This parameter should be set to true while storing data to
/// filesystem like ext3 that can lose files after a reboot. /// filesystem like ext3 that can lose files after a reboot.
/// ///
/// Default: false /// Default: `false`
/// ///
/// # Example /// # Example
/// ///
@ -291,9 +291,9 @@ impl Options {
/// written, asynchronously, in the background. This operation can be used /// written, asynchronously, in the background. This operation can be used
/// to smooth out write I/Os over time. Users shouldn't rely on it for /// to smooth out write I/Os over time. Users shouldn't rely on it for
/// persistency guarantee. /// persistency guarantee.
/// Issue one request for every bytes_per_sync written. 0 turns it off. /// Issue one request for every bytes_per_sync written. `0` turns it off.
/// ///
/// Default: 0 /// Default: `0`
/// ///
/// You may consider using rate_limiter to regulate write rate to device. /// You may consider using rate_limiter to regulate write rate to device.
/// When rate limiter is enabled, it automatically enables bytes_per_sync /// When rate limiter is enabled, it automatically enables bytes_per_sync
@ -331,7 +331,7 @@ impl Options {
/// cache. If the disk block is requested again this can result in /// cache. If the disk block is requested again this can result in
/// additional disk I/O. /// additional disk I/O.
/// ///
/// On WINDOWS system, files will be opened in "unbuffered I/O" mode /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
/// which means that data read from the disk will not be cached or /// which means that data read from the disk will not be cached or
/// bufferized. The hardware buffer of the devices may however still /// bufferized. The hardware buffer of the devices may however still
/// be used. Memory mapped files are not impacted by this parameter. /// be used. Memory mapped files are not impacted by this parameter.
@ -354,7 +354,7 @@ impl Options {
/// Sets the number of shards used for table cache. /// Sets the number of shards used for table cache.
/// ///
/// Default: 6 /// Default: `6`
/// ///
/// # Example /// # Example
/// ///
@ -371,14 +371,14 @@ impl Options {
} }
/// Sets the minimum number of write buffers that will be merged together /// Sets the minimum number of write buffers that will be merged together
/// before writing to storage. If set to 1, then /// before writing to storage. If set to `1`, then
/// all write buffers are flushed to L0 as individual files and this increases /// all write buffers are flushed to L0 as individual files and this increases
/// read amplification because a get request has to check in all of these /// read amplification because a get request has to check in all of these
/// files. Also, an in-memory merge may result in writing lesser /// files. Also, an in-memory merge may result in writing lesser
/// data to storage if there are duplicate records in each of these /// data to storage if there are duplicate records in each of these
/// individual write buffers. /// individual write buffers.
/// ///
/// Default: 1 /// Default: `1`
/// ///
/// # Example /// # Example
/// ///
@ -410,9 +410,9 @@ impl Options {
/// Increasing this value can reduce the number of reads to SST files /// Increasing this value can reduce the number of reads to SST files
/// done for conflict detection. /// done for conflict detection.
/// ///
/// Setting this value to 0 will cause write buffers to be freed immediately /// Setting this value to `0` will cause write buffers to be freed immediately
/// after they are flushed. /// after they are flushed.
/// If this value is set to -1, 'max_write_buffer_number' will be used. /// If this value is set to `-1`, 'max_write_buffer_number' will be used.
/// ///
/// Default: /// Default:
/// If using a TransactionDB/OptimisticTransactionDB, the default value will /// If using a TransactionDB/OptimisticTransactionDB, the default value will
@ -446,7 +446,7 @@ impl Options {
/// Note that write_buffer_size is enforced per column family. /// Note that write_buffer_size is enforced per column family.
/// See db_write_buffer_size for sharing memory across column families. /// See db_write_buffer_size for sharing memory across column families.
/// ///
/// Default: 67108864 (64MiB) /// Default: `0x4000000` (64MiB)
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -473,7 +473,7 @@ impl Options {
/// will be 200MB, total file size for level-2 will be 2GB, /// will be 200MB, total file size for level-2 will be 2GB,
/// and total file size for level-3 will be 20GB. /// and total file size for level-3 will be 20GB.
/// ///
/// Default: 268435456 (256MiB). /// Default: `0x10000000` (256MiB).
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -491,7 +491,7 @@ impl Options {
} }
} }
/// Default: 10 /// Default: `10`
/// ///
/// # Example /// # Example
/// ///
@ -534,7 +534,7 @@ impl Options {
/// be 2MB, and each file on level 2 will be 20MB, /// be 2MB, and each file on level 2 will be 20MB,
/// and each file on level-3 will be 200MB. /// and each file on level-3 will be 200MB.
/// ///
/// Default: 67108864 (64MiB) /// Default: `0x4000000` (64MiB)
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -553,14 +553,14 @@ impl Options {
} }
/// Sets the minimum number of write buffers that will be merged together /// Sets the minimum number of write buffers that will be merged together
/// before writing to storage. If set to 1, then /// before writing to storage. If set to `1`, then
/// all write buffers are flushed to L0 as individual files and this increases /// all write buffers are flushed to L0 as individual files and this increases
/// read amplification because a get request has to check in all of these /// read amplification because a get request has to check in all of these
/// files. Also, an in-memory merge may result in writing lesser /// files. Also, an in-memory merge may result in writing lesser
/// data to storage if there are duplicate records in each of these /// data to storage if there are duplicate records in each of these
/// individual write buffers. /// individual write buffers.
/// ///
/// Default: 1 /// Default: `1`
/// ///
/// # Example /// # Example
/// ///
@ -576,10 +576,10 @@ impl Options {
} }
} }
/// Sets the number of files to trigger level-0 compaction. A value <0 means that /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
/// level-0 compaction will not be triggered by number of files at all. /// level-0 compaction will not be triggered by number of files at all.
/// ///
/// Default: 4 /// Default: `4`
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -598,10 +598,10 @@ impl Options {
} }
/// Sets the soft limit on number of level-0 files. We start slowing down writes at this /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
/// point. A value <0 means that no writing slow down will be triggered by /// point. A value < `0` means that no writing slow down will be triggered by
/// number of files in level-0. /// number of files in level-0.
/// ///
/// Default: 20 /// Default: `20`
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -621,7 +621,7 @@ impl Options {
/// Sets the maximum number of level-0 files. We stop writes at this point. /// Sets the maximum number of level-0 files. We stop writes at this point.
/// ///
/// Default: 24 /// Default: `24`
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -669,7 +669,7 @@ impl Options {
/// LOW priority thread pool. For more information, see /// LOW priority thread pool. For more information, see
/// Env::SetBackgroundThreads /// Env::SetBackgroundThreads
/// ///
/// Default: 1 /// Default: `1`
/// ///
/// # Example /// # Example
/// ///
@ -700,7 +700,7 @@ impl Options {
/// HIGH priority thread pool. For more information, see /// HIGH priority thread pool. For more information, see
/// Env::SetBackgroundThreads /// Env::SetBackgroundThreads
/// ///
/// Default: 1 /// Default: `1`
/// ///
/// # Example /// # Example
/// ///
@ -719,7 +719,7 @@ impl Options {
/// Disables automatic compactions. Manual compactions can still /// Disables automatic compactions. Manual compactions can still
/// be issued on this column family /// be issued on this column family
/// ///
/// Default: false /// Default: `false`
/// ///
/// Dynamically changeable through SetOptions() API /// Dynamically changeable through SetOptions() API
/// ///
@ -741,9 +741,9 @@ impl Options {
} }
} }
/// Measure IO stats in compactions and flushes, if true. /// Measure IO stats in compactions and flushes, if `true`.
/// ///
/// Default: false /// Default: `false`
/// ///
/// # Example /// # Example
/// ///
@ -759,7 +759,7 @@ impl Options {
} }
} }
/// Recovery mode to control the consistency while replaying WAL /// Recovery mode to control the consistency while replaying WAL.
/// ///
/// Default: DBRecoveryMode::PointInTime /// Default: DBRecoveryMode::PointInTime
/// ///
@ -797,9 +797,9 @@ impl Options {
} }
} }
/// If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
/// ///
/// Default: 600 (10 min) /// Default: `600` (10 mins)
/// ///
/// # Example /// # Example
/// ///
@ -815,7 +815,7 @@ impl Options {
} }
} }
/// Sets the number of levels for this database /// Sets the number of levels for this database.
pub fn set_num_levels(&mut self, n: c_int) { pub fn set_num_levels(&mut self, n: c_int) {
unsafe { unsafe {
ffi::rocksdb_options_set_num_levels(self.inner, n); ffi::rocksdb_options_set_num_levels(self.inner, n);

Loading…
Cancel
Save