Listing of changes

- JavaDoc readability of RocksObject JavaDoc
- JavaDoc improvements BlockBasedTableConfig, GenericRateLimiterConfig, RocksDB
- JavaDoc improvements MemTableConfig
- JavaDoc improvements RocksObject
- JavaDoc improvements GenericRateLimiterConfig
- JavaDoc improvements ReadOptions
- JavaDoc improvements RateLimiterConfig
- JavaDoc improvements RestoreOptions
- JavaDoc improvements RestoreBackupableDB
- JavaDoc improvements BlockBasedTableConfig
- JavaDoc improvements Options
- JavaDoc improvements BackupableDB and BackupableDBOptions
main
fyrz 10 years ago
parent df3373fbf7
commit a5757ff3c2
  1. 23
      java/org/rocksdb/BackupableDB.java
  2. 50
      java/org/rocksdb/BackupableDBOptions.java
  3. 23
      java/org/rocksdb/BlockBasedTableConfig.java
  4. 34
      java/org/rocksdb/GenericRateLimiterConfig.java
  5. 2
      java/org/rocksdb/MemTableConfig.java
  6. 204
      java/org/rocksdb/Options.java
  7. 9
      java/org/rocksdb/RateLimiterConfig.java
  8. 5
      java/org/rocksdb/ReadOptions.java
  9. 21
      java/org/rocksdb/RestoreBackupableDB.java
  10. 14
      java/org/rocksdb/RestoreOptions.java
  11. 9
      java/org/rocksdb/RocksDB.java
  12. 77
      java/org/rocksdb/RocksObject.java

@ -8,19 +8,19 @@ package org.rocksdb;
/** /**
* A subclass of RocksDB which supports backup-related operations. * A subclass of RocksDB which supports backup-related operations.
* *
* @see BackupableDBOptions * @see org.rocksdb.BackupableDBOptions
*/ */
public class BackupableDB extends RocksDB { public class BackupableDB extends RocksDB {
/** /**
* Open a BackupableDB under the specified path. * Open a {@code BackupableDB} under the specified path.
* Note that the backup path should be set properly in the * Note that the backup path should be set properly in the
* input BackupableDBOptions. * input BackupableDBOptions.
* *
* @param opt options for db. * @param opt {@link org.rocksdb.Options} to set for the database.
* @param bopt backup related options. * @param bopt {@link org.rocksdb.BackupableDBOptions} to use.
* @param the db path for storing data. The path for storing * @param db_path Path to store data to. The path for storing the backup should be
* backup should be specified in the BackupableDBOptions. * specified in the {@link org.rocksdb.BackupableDBOptions}.
* @return reference to the opened BackupableDB. * @return BackupableDB reference to the opened database.
*/ */
public static BackupableDB open( public static BackupableDB open(
Options opt, BackupableDBOptions bopt, String db_path) Options opt, BackupableDBOptions bopt, String db_path)
@ -61,10 +61,9 @@ public class BackupableDB extends RocksDB {
/** /**
* Close the BackupableDB instance and release resource. * Close the BackupableDB instance and release resource.
* *
* Internally, BackupableDB owns the rocksdb::DB pointer to its * Internally, BackupableDB owns the {@code rocksdb::DB} pointer to its associated
* associated RocksDB. The release of that RocksDB pointer is * {@link org.rocksdb.RocksDB}. The release of that RocksDB pointer is handled in the destructor
* handled in the destructor of the c++ rocksdb::BackupableDB and * of the c++ {@code rocksdb::BackupableDB} and should be transparent to Java developers.
* should be transparent to Java developers.
*/ */
@Override public synchronized void close() { @Override public synchronized void close() {
if (isInitialized()) { if (isInitialized()) {
@ -74,7 +73,7 @@ public class BackupableDB extends RocksDB {
/** /**
* A protected construction that will be used in the static factory * A protected construction that will be used in the static factory
* method BackupableDB.open(). * method {@link #open(Options, BackupableDBOptions, String)}.
*/ */
protected BackupableDB() { protected BackupableDB() {
super(); super();

@ -7,33 +7,41 @@ package org.rocksdb;
/** /**
* BackupableDBOptions to control the behavior of a backupable database. * BackupableDBOptions to control the behavior of a backupable database.
* It will be used during the creation of a BackupableDB. * It will be used during the creation of a {@link org.rocksdb.BackupableDB}.
* *
* Note that dispose() must be called before an Options instance * Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++. * become out-of-scope to release the allocated memory in c++.
* *
* @param path Where to keep the backup files. Has to be different than dbname. * @see org.rocksdb.BackupableDB
Best to set this to dbname_ + "/backups"
* @param shareTableFiles If share_table_files == true, backup will assume that
* table files with same name have the same contents. This enables
* incremental backups and avoids unnecessary data copies. If
* share_table_files == false, each backup will be on its own and will not
* share any data with other backups. default: true
* @param sync If sync == true, we can guarantee you'll get consistent backup
* even on a machine crash/reboot. Backup process is slower with sync
* enabled. If sync == false, we don't guarantee anything on machine reboot.
* However, chances are some of the backups are consistent. Default: true
* @param destroyOldData If true, it will delete whatever backups there are
* already. Default: false
* @param backupLogFiles If false, we won't backup log files. This option can be
* useful for backing up in-memory databases where log file are persisted,
* but table files are in memory. Default: true
* @param backupRateLimit Max bytes that can be transferred in a second during
* backup. If 0 or negative, then go as fast as you can. Default: 0
* @param restoreRateLimit Max bytes that can be transferred in a second during
* restore. If 0 or negative, then go as fast as you can. Default: 0
*/ */
public class BackupableDBOptions extends RocksObject { public class BackupableDBOptions extends RocksObject {
/**
* BackupableDBOptions constructor
*
* @param path Where to keep the backup files. Has to be different than db name.
* Best to set this to {@code db name_ + "/backups"}
* @param shareTableFiles If {@code share_table_files == true}, backup will assume
* that table files with same name have the same contents. This enables incremental
* backups and avoids unnecessary data copies. If {@code share_table_files == false},
* each backup will be on its own and will not share any data with other backups.
* Default: true
* @param sync If {@code sync == true}, we can guarantee you'll get consistent backup
* even on a machine crash/reboot. Backup process is slower with sync enabled.
* If {@code sync == false}, we don't guarantee anything on machine reboot.
* However,chances are some of the backups are consistent.
* Default: true
* @param destroyOldData If true, it will delete whatever backups there are already.
* Default: false
* @param backupLogFiles If false, we won't backup log files. This option can be
* useful for backing up in-memory databases where log file are persisted,but table
* files are in memory.
* Default: true
* @param backupRateLimit Max bytes that can be transferred in a second during backup.
* If 0 or negative, then go as fast as you can. Default: 0
* @param restoreRateLimit Max bytes that can be transferred in a second during restore.
* If 0 or negative, then go as fast as you can. Default: 0
*/
public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync, public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync,
boolean destroyOldData, boolean backupLogFiles, long backupRateLimit, boolean destroyOldData, boolean backupLogFiles, long backupRateLimit,
long restoreRateLimit) { long restoreRateLimit) {

@ -27,7 +27,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Disable block cache. If this is set to true, * Disable block cache. If this is set to true,
* then no block cache should be used, and the block_cache should * then no block cache should be used, and the block_cache should
* point to a nullptr object. * point to a {@code nullptr} object.
* Default: false * Default: false
* *
* @param noBlockCache if use block cache * @param noBlockCache if use block cache
@ -69,7 +69,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Controls the number of shards for the block cache. * Controls the number of shards for the block cache.
* This is applied only if cacheSize is set to non-negative. * This is applied only if cacheSize is set to non-negative.
* *
* @param numShardBits the number of shard bits. The resulting * @param blockCacheNumShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative * number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings." * number means use default settings."
* @return the reference to the current option. * @return the reference to the current option.
@ -176,13 +176,14 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use the specified filter policy to reduce disk reads. * Use the specified filter policy to reduce disk reads.
* *
* Filter should not be disposed before options instances using this filter is * {@link org.rocksdb.Filter} should not be disposed before options instances
* disposed. If dispose() function is not called, then filter object will be * using this filter is disposed. If {@link Filter#dispose()} function is not
* GC'd automatically. * called, then filter object will be GC'd automatically.
* *
* Filter instance can be re-used in multiple options instances. * {@link org.rocksdb.Filter} instance can be re-used in multiple options
* instances.
* *
* @param Filter Filter Policy java instance. * @param filter {@link org.rocksdb.Filter} Filter Policy java instance.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public BlockBasedTableConfig setFilter(Filter filter) { public BlockBasedTableConfig setFilter(Filter filter) {
@ -206,7 +207,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
If not specified, each "table reader" object will pre-load index/filter If not specified, each "table reader" object will pre-load index/filter
block during table initialization. block during table initialization.
* *
* @param index and filter blocks should be put in block cache. * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public BlockBasedTableConfig setCacheIndexAndFilterBlocks( public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
@ -233,7 +234,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
if true, does not store prefix and allows prefix hash collision if true, does not store prefix and allows prefix hash collision
(less memory consumption) (less memory consumption)
* *
* @param if hash collisions should be allowed. * @param hashIndexAllowCollision points out if hash collisions should be allowed.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public BlockBasedTableConfig setHashIndexAllowCollision( public BlockBasedTableConfig setHashIndexAllowCollision(
@ -256,7 +257,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Size of compressed block cache. If 0, then block_cache_compressed is set * Size of compressed block cache. If 0, then block_cache_compressed is set
* to null. * to null.
* *
* @param size of compressed block cache. * @param blockCacheCompressedSize of compressed block cache.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public BlockBasedTableConfig setBlockCacheCompressedSize( public BlockBasedTableConfig setBlockCacheCompressedSize(
@ -281,7 +282,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Controls the number of shards for the block compressed cache. * Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative. * This is applied only if blockCompressedCacheSize is set to non-negative.
* *
* @param numShardBits the number of shard bits. The resulting * @param blockCacheCompressedNumShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative * number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings." * number means use default settings."
* @return the reference to the current option. * @return the reference to the current option.

@ -7,18 +7,48 @@ package org.rocksdb;
/** /**
* Config for rate limiter, which is used to control write rate of flush and * Config for rate limiter, which is used to control write rate of flush and
* compaction. * compaction.
*
* @see RateLimiterConfig
*/ */
public class GenericRateLimiterConfig extends RateLimiterConfig { public class GenericRateLimiterConfig extends RateLimiterConfig {
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000); private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
private static final int DEFAULT_FAIRNESS = 10; private static final int DEFAULT_FAIRNESS = 10;
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
* @param refillPeriodMicros this controls how often tokens are refilled. For example,
* when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
* 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
* burstier writes while smaller value introduces more CPU overhead.
* The default should work for most cases.
* @param fairness RateLimiter accepts high-pri requests and low-pri requests.
* A low-pri request is usually blocked in favor of hi-pri request. Currently,
* RocksDB assigns low-pri to request from compaction and high-pri to request
* from flush. Low-pri requests can get blocked if flush requests come in
* continuously. This fairness parameter grants low-pri requests permission by
* fairness chance even though high-pri requests exist to avoid starvation.
* You should be good by leaving it at default 10.
*/
public GenericRateLimiterConfig(long rateBytesPerSecond, public GenericRateLimiterConfig(long rateBytesPerSecond,
long refillPeriodMicros, int fairness) { long refillPeriodMicros, int fairness) {
rateBytesPerSecond_ = rateBytesPerSecond; rateBytesPerSecond_ = rateBytesPerSecond;
refillPeriodMicros_ = refillPeriodMicros; refillPeriodMicros_ = refillPeriodMicros;
fairness_ = fairness; fairness_ = fairness;
} }
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
*/
public GenericRateLimiterConfig(long rateBytesPerSecond) { public GenericRateLimiterConfig(long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS); this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
} }

@ -21,7 +21,7 @@ public abstract class MemTableConfig {
* which will create a c++ shared-pointer to the c++ MemTableRepFactory * which will create a c++ shared-pointer to the c++ MemTableRepFactory
* that associated with the Java MemTableConfig. * that associated with the Java MemTableConfig.
* *
* @see Options.setMemTableFactory() * @see Options#setMemTableConfig(MemTableConfig)
*/ */
abstract protected long newMemTableFactoryHandle(); abstract protected long newMemTableFactoryHandle();
} }

@ -7,10 +7,10 @@ package org.rocksdb;
/** /**
* Options to control the behavior of a database. It will be used * Options to control the behavior of a database. It will be used
* during the creation of a RocksDB (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* *
* If dispose() function is not called, then it will be GC'd automatically and * If {@link #dispose()} function is not called, then it will be GC'd automatically
* native resources will be released as part of the process. * and native resources will be released as part of the process.
*/ */
public class Options extends RocksObject { public class Options extends RocksObject {
static { static {
@ -30,7 +30,7 @@ public class Options extends RocksObject {
* Construct options for opening a RocksDB. * Construct options for opening a RocksDB.
* *
* This constructor will create (by allocating a block of memory) * This constructor will create (by allocating a block of memory)
* an rocksdb::Options in the c++ side. * an {@code rocksdb::Options} in the c++ side.
*/ */
public Options() { public Options() {
super(); super();
@ -42,13 +42,14 @@ public class Options extends RocksObject {
/** /**
* If this value is set to true, then the database will be created * If this value is set to true, then the database will be created
* if it is missing during RocksDB.open(). * if it is missing during {@code RocksDB.open()}.
* Default: false * Default: false
* *
* @param flag a flag indicating whether to create a database the * @param flag a flag indicating whether to create a database the
* specified database in RocksDB.open() operation is missing. * specified database in {@link org.rocksdb.RocksDB#open(Options, String)} operation
* @return the instance of the current Options. * is missing.
* @see RocksDB.open() * @return the instance of the current Options
* @see org.rocksdb.RocksDB#open(Options, String)
*/ */
public Options setCreateIfMissing(boolean flag) { public Options setCreateIfMissing(boolean flag) {
assert(isInitialized()); assert(isInitialized());
@ -59,7 +60,7 @@ public class Options extends RocksObject {
/** /**
* Use the specified object to interact with the environment, * Use the specified object to interact with the environment,
* e.g. to read/write files, schedule background work, etc. * e.g. to read/write files, schedule background work, etc.
* Default: RocksEnv.getDefault() * Default: {@link RocksEnv#getDefault()}
*/ */
public Options setEnv(RocksEnv env) { public Options setEnv(RocksEnv env) {
assert(isInitialized()); assert(isInitialized());
@ -79,7 +80,7 @@ public class Options extends RocksObject {
* If true, the database will be created if it is missing. * If true, the database will be created if it is missing.
* *
* @return true if the createIfMissing option is set to true. * @return true if the createIfMissing option is set to true.
* @see setCreateIfMissing() * @see #setCreateIfMissing(boolean)
*/ */
public boolean createIfMissing() { public boolean createIfMissing() {
assert(isInitialized()); assert(isInitialized());
@ -87,12 +88,12 @@ public class Options extends RocksObject {
} }
/** /**
* Set BuiltinComparator to be used with RocksDB. * Set {@link org.rocksdb.Options.BuiltinComparator} to be used with RocksDB.
* *
* Note: Comparator can be set once upon database creation. * Note: Comparator can be set once upon database creation.
* *
* Default: BytewiseComparator. * Default: BytewiseComparator.
* @param builtinComparator a BuiltinComparator type. * @param builtinComparator a {@link org.rocksdb.Options.BuiltinComparator} type.
*/ */
public void setBuiltinComparator(BuiltinComparator builtinComparator) { public void setBuiltinComparator(BuiltinComparator builtinComparator) {
assert(isInitialized()); assert(isInitialized());
@ -106,7 +107,7 @@ public class Options extends RocksObject {
* on disk) before converting to a sorted on-disk file. * on disk) before converting to a sorted on-disk file.
* *
* Larger values increase performance, especially during bulk loads. * Larger values increase performance, especially during bulk loads.
* Up to max_write_buffer_number write buffers may be held in memory * Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter * at the same time, so you may wish to adjust this parameter
* to control memory usage. * to control memory usage.
* *
@ -116,7 +117,7 @@ public class Options extends RocksObject {
* Default: 4MB * Default: 4MB
* @param writeBufferSize the size of write buffer. * @param writeBufferSize the size of write buffer.
* @return the instance of the current Options. * @return the instance of the current Options.
* @see RocksDB.open() * @see org.rocksdb.RocksDB#open(Options, String)
*/ */
public Options setWriteBufferSize(long writeBufferSize) { public Options setWriteBufferSize(long writeBufferSize) {
assert(isInitialized()); assert(isInitialized());
@ -128,7 +129,7 @@ public class Options extends RocksObject {
* Return size of write buffer size. * Return size of write buffer size.
* *
* @return size of write buffer. * @return size of write buffer.
* @see setWriteBufferSize() * @see #setWriteBufferSize(long)
*/ */
public long writeBufferSize() { public long writeBufferSize() {
assert(isInitialized()); assert(isInitialized());
@ -143,7 +144,7 @@ public class Options extends RocksObject {
* *
* @param maxWriteBufferNumber maximum number of write buffers. * @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Options. * @return the instance of the current Options.
* @see RocksDB.open() * @see org.rocksdb.RocksDB#open(Options, String)
*/ */
public Options setMaxWriteBufferNumber(int maxWriteBufferNumber) { public Options setMaxWriteBufferNumber(int maxWriteBufferNumber) {
assert(isInitialized()); assert(isInitialized());
@ -155,7 +156,7 @@ public class Options extends RocksObject {
* Returns maximum number of write buffers. * Returns maximum number of write buffers.
* *
* @return maximum number of write buffers. * @return maximum number of write buffers.
* @see setMaxWriteBufferNumber() * @see #setMaxWriteBufferNumber(int)
*/ */
public int maxWriteBufferNumber() { public int maxWriteBufferNumber() {
assert(isInitialized()); assert(isInitialized());
@ -181,9 +182,9 @@ public class Options extends RocksObject {
* Default: false * Default: false
* *
* @param errorIfExists if true, an exception will be thrown * @param errorIfExists if true, an exception will be thrown
* during RocksDB.open() if the database already exists. * during {@code RocksDB.open()} if the database already exists.
* @return the reference to the current option. * @return the reference to the current option.
* @see RocksDB.open() * @see org.rocksdb.RocksDB#open(Options, String)
*/ */
public Options setErrorIfExists(boolean errorIfExists) { public Options setErrorIfExists(boolean errorIfExists) {
assert(isInitialized()); assert(isInitialized());
@ -237,8 +238,9 @@ public class Options extends RocksObject {
* Number of open files that can be used by the DB. You may need to * Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means * increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based * files opened are always kept open. You can estimate number of files based
* on target_file_size_base and target_file_size_multiplier for level-based * on {@code target_file_size_base} and {@code target_file_size_multiplier}
* compaction. For universal-style compaction, you can usually set it to -1. * for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
* *
* @return the maximum number of open files. * @return the maximum number of open files.
*/ */
@ -252,8 +254,9 @@ public class Options extends RocksObject {
* Number of open files that can be used by the DB. You may need to * Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means * increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based * files opened are always kept open. You can estimate number of files based
* on target_file_size_base and target_file_size_multiplier for level-based * on {@code target_file_size_base} and {@code target_file_size_multiplier}
* compaction. For universal-style compaction, you can usually set it to -1. * for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
* Default: 5000 * Default: 5000
* *
* @param maxOpenFiles the maximum number of open files. * @param maxOpenFiles the maximum number of open files.
@ -271,7 +274,7 @@ public class Options extends RocksObject {
* to stable storage. Their contents remain in the OS buffers till the * to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading * OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a * of data. Once the bulk-loading is complete, please issue a
* sync to the OS to flush all dirty buffesrs to stable storage. * sync to the OS to flush all dirty buffers to stable storage.
* *
* @return if true, then data-sync is disabled. * @return if true, then data-sync is disabled.
*/ */
@ -286,7 +289,7 @@ public class Options extends RocksObject {
* to stable storage. Their contents remain in the OS buffers till the * to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading * OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a * of data. Once the bulk-loading is complete, please issue a
* sync to the OS to flush all dirty buffesrs to stable storage. * sync to the OS to flush all dirty buffers to stable storage.
* Default: false * Default: false
* *
* @param disableDataSync a boolean flag to specify whether to * @param disableDataSync a boolean flag to specify whether to
@ -306,7 +309,7 @@ public class Options extends RocksObject {
* This parameter should be set to true while storing data to * This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot. * filesystem like ext3 that can lose files after a reboot.
* *
* @return true if fsync is used. * @return boolean value indicating if fsync is used.
*/ */
public boolean useFsync() { public boolean useFsync() {
assert(isInitialized()); assert(isInitialized());
@ -438,7 +441,8 @@ public class Options extends RocksObject {
* Default: 1 * Default: 1
* *
* @return the maximum number of concurrent background compaction jobs. * @return the maximum number of concurrent background compaction jobs.
* @see Env.setBackgroundThreads() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
*/ */
public int maxBackgroundCompactions() { public int maxBackgroundCompactions() {
assert(isInitialized()); assert(isInitialized());
@ -451,7 +455,7 @@ public class Options extends RocksObject {
it does not use any locks to prevent concurrent updates. it does not use any locks to prevent concurrent updates.
* *
* @return the instance of the current Options. * @return the instance of the current Options.
* @see RocksDB.open() * @see org.rocksdb.RocksDB#open(Options, String)
*/ */
public Options createStatistics() { public Options createStatistics() {
assert(isInitialized()); assert(isInitialized());
@ -460,11 +464,11 @@ public class Options extends RocksObject {
} }
/** /**
* Returns statistics object. Calls createStatistics() if * Returns statistics object. Calls {@link #createStatistics()} if
* C++ returns NULL pointer for statistics. * C++ returns {@code nullptr} for statistics.
* *
* @return the instance of the statistics object. * @return the instance of the statistics object.
* @see createStatistics() * @see #createStatistics()
*/ */
public Statistics statisticsPtr() { public Statistics statisticsPtr() {
assert(isInitialized()); assert(isInitialized());
@ -489,8 +493,9 @@ public class Options extends RocksObject {
* compaction jobs. * compaction jobs.
* @return the reference to the current option. * @return the reference to the current option.
* *
* @see Env.setBackgroundThreads() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
* @see maxBackgroundFlushes() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundFlushes()
*/ */
public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) { public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) {
assert(isInitialized()); assert(isInitialized());
@ -505,7 +510,8 @@ public class Options extends RocksObject {
* Default: 1 * Default: 1
* *
* @return the maximum number of concurrent background flush jobs. * @return the maximum number of concurrent background flush jobs.
* @see Env.setBackgroundThreads() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
*/ */
public int maxBackgroundFlushes() { public int maxBackgroundFlushes() {
assert(isInitialized()); assert(isInitialized());
@ -519,11 +525,12 @@ public class Options extends RocksObject {
* HIGH priority thread pool. For more information, see * HIGH priority thread pool. For more information, see
* Default: 1 * Default: 1
* *
* @param maxBackgroundFlushes * @param maxBackgroundFlushes number of max concurrent flush jobs
* @return the reference to the current option. * @return the reference to the current option.
* *
* @see Env.setBackgroundThreads() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
* @see maxBackgroundCompactions() * @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundCompactions()
*/ */
public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) { public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) {
assert(isInitialized()); assert(isInitialized());
@ -713,20 +720,22 @@ public class Options extends RocksObject {
/** /**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted. * will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into * <ol>
* the archive. * <li>If both set to 0, logs will be deleted asap and will not get into
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, * the archive.</li>
* WAL files will be checked every 10 min and if total size is greater * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* then WAL_size_limit_MB, they will be deleted starting with the * WAL files will be checked every 10 min and if total size is greater
* earliest until size_limit is met. All empty files will be deleted. * then WAL_size_limit_MB, they will be deleted starting with the
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then * earliest until size_limit is met. All empty files will be deleted.</li>
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* are older than WAL_ttl_seconds will be deleted. * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* 4. If both are not 0, WAL files will be checked every 10 min and both * are older than WAL_ttl_seconds will be deleted.</li>
* checks will be performed with ttl being first. * <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
* *
* @return the wal-ttl seconds * @return the wal-ttl seconds
* @see walSizeLimitMB() * @see #walSizeLimitMB()
*/ */
public long walTtlSeconds() { public long walTtlSeconds() {
assert(isInitialized()); assert(isInitialized());
@ -735,23 +744,24 @@ public class Options extends RocksObject {
private native long walTtlSeconds(long handle); private native long walTtlSeconds(long handle);
/** /**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
* will be deleted. * will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into * <ol>
* the archive. * <li>If both set to 0, logs will be deleted asap and will not get into
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, * the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater * WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the * then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted. * earliest until size_limit is met. All empty files will be deleted.</li>
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted. * are older than WAL_ttl_seconds will be deleted.</li>
* 4. If both are not 0, WAL files will be checked every 10 min and both * <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first. * checks will be performed with ttl being first.</li>
* *
* @param walTtlSeconds the ttl seconds * @param walTtlSeconds the ttl seconds
* @return the reference to the current option. * @return the reference to the current option.
* @see setWalSizeLimitMB() * @see #setWalSizeLimitMB(long)
*/ */
public Options setWalTtlSeconds(long walTtlSeconds) { public Options setWalTtlSeconds(long walTtlSeconds) {
assert(isInitialized()); assert(isInitialized());
@ -761,22 +771,23 @@ public class Options extends RocksObject {
private native void setWalTtlSeconds(long handle, long walTtlSeconds); private native void setWalTtlSeconds(long handle, long walTtlSeconds);
/** /**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
* will be deleted. * will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into * <ol>
* the archive. * <li>If both set to 0, logs will be deleted asap and will not get into
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, * the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater * WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the * then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted. * earliest until size_limit is met. All empty files will be deleted.</li>
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that * WAL files will be checked every WAL_ttl_seconds i / 2 and those that
* are older than WAL_ttl_seconds will be deleted. * are older than WAL_ttl_seconds will be deleted.</li>
* 4. If both are not 0, WAL files will be checked every 10 min and both * <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first. * checks will be performed with ttl being first.</li>
* * </ol>
* @return size limit in mega-bytes. * @return size limit in mega-bytes.
* @see walSizeLimitMB() * @see #walSizeLimitMB()
*/ */
public long walSizeLimitMB() { public long walSizeLimitMB() {
assert(isInitialized()); assert(isInitialized());
@ -787,21 +798,22 @@ public class Options extends RocksObject {
/** /**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted. * will be deleted.
* 1. If both set to 0, logs will be deleted asap and will not get into * <ol>
* the archive. * <li>If both set to 0, logs will be deleted asap and will not get into
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, * the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater * WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the * then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted. * earliest until size_limit is met. All empty files will be deleted.</li>
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted. * are older than WAL_ttl_seconds will be deleted.</li>
* 4. If both are not 0, WAL files will be checked every 10 min and both * <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first. * checks will be performed with ttl being first.</li>
* *
* @param sizeLimitMB size limit in mega-bytes. * @param sizeLimitMB size limit in mega-bytes.
* @return the reference to the current option. * @return the reference to the current option.
* @see setWalSizeLimitMB() * @see #setWalSizeLimitMB(long)
*/ */
public Options setWalSizeLimitMB(long sizeLimitMB) { public Options setWalSizeLimitMB(long sizeLimitMB) {
assert(isInitialized()); assert(isInitialized());
@ -857,7 +869,7 @@ public class Options extends RocksObject {
* Data being read from file storage may be buffered in the OS * Data being read from file storage may be buffered in the OS
* Default: true * Default: true
* *
* @param allowOsBufferif true, then OS buffering is allowed. * @param allowOsBuffer if true, then OS buffering is allowed.
* @return the reference to the current option. * @return the reference to the current option.
*/ */
public Options setAllowOsBuffer(boolean allowOsBuffer) { public Options setAllowOsBuffer(boolean allowOsBuffer) {
@ -1122,7 +1134,7 @@ public class Options extends RocksObject {
* Memtable format can be set using setTableFormatConfig. * Memtable format can be set using setTableFormatConfig.
* *
* @return the name of the currently-used memtable factory. * @return the name of the currently-used memtable factory.
* @see setTableFormatConfig() * @see #setTableFormatConfig(TableFormatConfig)
*/ */
public String memTableFactoryName() { public String memTableFactoryName() {
assert(isInitialized()); assert(isInitialized());
@ -1273,7 +1285,7 @@ public class Options extends RocksObject {
long handle, int numLevels); long handle, int numLevels);
/** /**
* The number of files in leve 0 to trigger compaction from level-0 to * The number of files in level 0 to trigger compaction from level-0 to
* level-1. A value < 0 means that level-0 compaction will not be * level-1. A value < 0 means that level-0 compaction will not be
* triggered by number of files at all. * triggered by number of files at all.
* Default: 4 * Default: 4
@ -1400,7 +1412,7 @@ public class Options extends RocksObject {
* *
* @return the target size of a level-0 file. * @return the target size of a level-0 file.
* *
* @see targetFileSizeMultiplier() * @see #targetFileSizeMultiplier()
*/ */
public int targetFileSizeBase() { public int targetFileSizeBase() {
return targetFileSizeBase(nativeHandle_); return targetFileSizeBase(nativeHandle_);
@ -1421,7 +1433,7 @@ public class Options extends RocksObject {
* @param targetFileSizeBase the target size of a level-0 file. * @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option. * @return the reference to the current option.
* *
* @see setTargetFileSizeMultiplier() * @see #setTargetFileSizeMultiplier(int)
*/ */
public Options setTargetFileSizeBase(int targetFileSizeBase) { public Options setTargetFileSizeBase(int targetFileSizeBase) {
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
@ -1471,7 +1483,7 @@ public class Options extends RocksObject {
* by default 'maxBytesForLevelBase' is 10MB. * by default 'maxBytesForLevelBase' is 10MB.
* *
* @return the upper-bound of the total size of leve-1 files in bytes. * @return the upper-bound of the total size of leve-1 files in bytes.
* @see maxBytesForLevelMultiplier() * @see #maxBytesForLevelMultiplier()
*/ */
public long maxBytesForLevelBase() { public long maxBytesForLevelBase() {
return maxBytesForLevelBase(nativeHandle_); return maxBytesForLevelBase(nativeHandle_);
@ -1491,7 +1503,7 @@ public class Options extends RocksObject {
* @return maxBytesForLevelBase the upper-bound of the total size of * @return maxBytesForLevelBase the upper-bound of the total size of
* leve-1 files in bytes. * leve-1 files in bytes.
* @return the reference to the current option. * @return the reference to the current option.
* @see setMaxBytesForLevelMultiplier() * @see #setMaxBytesForLevelMultiplier(int)
*/ */
public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) { public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) {
setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
@ -1507,7 +1519,7 @@ public class Options extends RocksObject {
* *
* @return the ratio between the total size of level-(L+1) files and * @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L. * the total size of level-L files for all L.
* @see maxBytesForLevelBase() * @see #maxBytesForLevelBase()
*/ */
public int maxBytesForLevelMultiplier() { public int maxBytesForLevelMultiplier() {
return maxBytesForLevelMultiplier(nativeHandle_); return maxBytesForLevelMultiplier(nativeHandle_);
@ -1522,7 +1534,7 @@ public class Options extends RocksObject {
* @param multiplier the ratio between the total size of level-(L+1) * @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L. * files and the total size of level-L files for all L.
* @return the reference to the current option. * @return the reference to the current option.
* @see setMaxBytesForLevelBase() * @see #setMaxBytesForLevelBase(long)
*/ */
public Options setMaxBytesForLevelMultiplier(int multiplier) { public Options setMaxBytesForLevelMultiplier(int multiplier) {
setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
@ -1538,7 +1550,7 @@ public class Options extends RocksObject {
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes. * (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
* *
* @return the maximum number of bytes in all compacted files. * @return the maximum number of bytes in all compacted files.
* @see sourceCompactionFactor() * @see #sourceCompactionFactor()
*/ */
public int expandedCompactionFactor() { public int expandedCompactionFactor() {
return expandedCompactionFactor(nativeHandle_); return expandedCompactionFactor(nativeHandle_);
@ -1554,7 +1566,7 @@ public class Options extends RocksObject {
* @param expandedCompactionFactor the maximum number of bytes in all * @param expandedCompactionFactor the maximum number of bytes in all
* compacted files. * compacted files.
* @return the reference to the current option. * @return the reference to the current option.
* @see setSourceCompactionFactor() * @see #setSourceCompactionFactor(int)
*/ */
public Options setExpandedCompactionFactor(int expandedCompactionFactor) { public Options setExpandedCompactionFactor(int expandedCompactionFactor) {
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor); setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
@ -1573,7 +1585,7 @@ public class Options extends RocksObject {
* a compaction. * a compaction.
* *
* @return the maximum number of bytes in all source files to be compactedo. * @return the maximum number of bytes in all source files to be compactedo.
* @see expendedCompactionFactor() * @see #expandedCompactionFactor()
*/ */
public int sourceCompactionFactor() { public int sourceCompactionFactor() {
return sourceCompactionFactor(nativeHandle_); return sourceCompactionFactor(nativeHandle_);
@ -1592,7 +1604,7 @@ public class Options extends RocksObject {
* @param sourceCompactionFactor the maximum number of bytes in all * @param sourceCompactionFactor the maximum number of bytes in all
* source files to be compacted in a single compaction run. * source files to be compacted in a single compaction run.
* @return the reference to the current option. * @return the reference to the current option.
* @see setExpendedCompactionFactor() * @see #setExpandedCompactionFactor(int)
*/ */
public Options setSourceCompactionFactor(int sourceCompactionFactor) { public Options setSourceCompactionFactor(int sourceCompactionFactor) {
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor); setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
@ -1979,7 +1991,7 @@ public class Options extends RocksObject {
* This value will be used only when a prefix-extractor is specified. * This value will be used only when a prefix-extractor is specified.
* *
* @return the number of bloom-bits. * @return the number of bloom-bits.
* @see useFixedLengthPrefixExtractor() * @see #useFixedLengthPrefixExtractor(int)
*/ */
public int memtablePrefixBloomBits() { public int memtablePrefixBloomBits() {
return memtablePrefixBloomBits(nativeHandle_); return memtablePrefixBloomBits(nativeHandle_);
@ -2037,7 +2049,7 @@ public class Options extends RocksObject {
* Default: 0 * Default: 0
* *
* @return the level of locality of bloom-filter probes. * @return the level of locality of bloom-filter probes.
* @see setMemTablePrefixBloomProbes * @see #setMemtablePrefixBloomProbes(int)
*/ */
public int bloomLocality() { public int bloomLocality() {
return bloomLocality(nativeHandle_); return bloomLocality(nativeHandle_);
@ -2149,7 +2161,7 @@ public class Options extends RocksObject {
* *
* Default: 2 * Default: 2
* *
* @return * @return min partial merge operands
*/ */
public int minPartialMergeOperands() { public int minPartialMergeOperands() {
return minPartialMergeOperands(nativeHandle_); return minPartialMergeOperands(nativeHandle_);

@ -10,11 +10,12 @@ package org.rocksdb;
*/ */
public abstract class RateLimiterConfig { public abstract class RateLimiterConfig {
/** /**
* This function should only be called by Options.setRateLimiter(), * This function should only be called by
* which will create a c++ shared-pointer to the c++ RateLimiter * {@link org.rocksdb.Options#setRateLimiter(long, long)}, which will
* that is associated with the Java RateLimtierConifg. * create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
* with a Java RateLimiterConfig.
* *
* @see Options.setRateLimiter() * @see org.rocksdb.Options#setRateLimiter(long, long)
*/ */
abstract protected long newRateLimiterHandle(); abstract protected long newRateLimiterHandle();
} }

@ -64,7 +64,7 @@ public class ReadOptions extends RocksObject {
private native boolean fillCache(long handle); private native boolean fillCache(long handle);
/** /**
* Fill the cache when loading the block-based sst formated db. * Fill the cache when loading the block-based sst formatted db.
* Callers may wish to set this field to false for bulk scans. * Callers may wish to set this field to false for bulk scans.
* Default: true * Default: true
* *
@ -86,7 +86,8 @@ public class ReadOptions extends RocksObject {
* added data) and is optimized for sequential reads. It will return records * added data) and is optimized for sequential reads. It will return records
* that were inserted into the database after the creation of the iterator. * that were inserted into the database after the creation of the iterator.
* Default: false * Default: false
* Not supported in ROCKSDB_LITE mode! *
* Not supported in {@code ROCKSDB_LITE} mode!
* *
* @return true if tailing iterator is enabled. * @return true if tailing iterator is enabled.
*/ */

@ -11,9 +11,13 @@ package org.rocksdb;
* Note that dispose() must be called before this instance become out-of-scope * Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++. * to release the allocated memory in c++.
* *
* @param options Instance of BackupableDBOptions.
*/ */
public class RestoreBackupableDB extends RocksObject { public class RestoreBackupableDB extends RocksObject {
/**
* Constructor
*
* @param options {@link org.rocksdb.BackupableDBOptions} instance
*/
public RestoreBackupableDB(BackupableDBOptions options) { public RestoreBackupableDB(BackupableDBOptions options) {
super(); super();
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_); nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
@ -30,6 +34,12 @@ public class RestoreBackupableDB extends RocksObject {
* database will diverge from backups 4 and 5 and the new backup will fail. * database will diverge from backups 4 and 5 and the new backup will fail.
* If you want to create new backup, you will first have to delete backups 4 * If you want to create new backup, you will first have to delete backups 4
* and 5. * and 5.
*
* @param backupId id pointing to backup
* @param dbDir database directory to restore to
* @param walDir directory where wal files are located
* @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
* @throws RocksDBException
*/ */
public void restoreDBFromBackup(long backupId, String dbDir, String walDir, public void restoreDBFromBackup(long backupId, String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException { RestoreOptions restoreOptions) throws RocksDBException {
@ -39,6 +49,11 @@ public class RestoreBackupableDB extends RocksObject {
/** /**
* Restore from the latest backup. * Restore from the latest backup.
*
* @param dbDir database directory to restore to
* @param walDir directory where wal files are located
* @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
* @throws RocksDBException
*/ */
public void restoreDBFromLatestBackup(String dbDir, String walDir, public void restoreDBFromLatestBackup(String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException { RestoreOptions restoreOptions) throws RocksDBException {
@ -49,7 +64,7 @@ public class RestoreBackupableDB extends RocksObject {
/** /**
* Deletes old backups, keeping latest numBackupsToKeep alive. * Deletes old backups, keeping latest numBackupsToKeep alive.
* *
* @param Number of latest backups to keep * @param numBackupsToKeep of latest backups to keep
*/ */
public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException { public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException {
purgeOldBackups0(nativeHandle_, numBackupsToKeep); purgeOldBackups0(nativeHandle_, numBackupsToKeep);
@ -58,7 +73,7 @@ public class RestoreBackupableDB extends RocksObject {
/** /**
* Deletes a specific backup. * Deletes a specific backup.
* *
* @param ID of backup to delete. * @param backupId of backup to delete.
*/ */
public void deleteBackup(long backupId) throws RocksDBException { public void deleteBackup(long backupId) throws RocksDBException {
deleteBackup0(nativeHandle_, backupId); deleteBackup0(nativeHandle_, backupId);

@ -11,13 +11,17 @@ package org.rocksdb;
* Note that dispose() must be called before this instance become out-of-scope * Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++. * to release the allocated memory in c++.
* *
* @param If true, restore won't overwrite the existing log files in wal_dir. It
* will also move all log files from archive directory to wal_dir. Use this
* option in combination with BackupableDBOptions::backup_log_files = false
* for persisting in-memory databases.
* Default: false
*/ */
public class RestoreOptions extends RocksObject { public class RestoreOptions extends RocksObject {
/**
* Constructor
*
* @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It
* will also move all log files from archive directory to wal_dir. Use this
* option in combination with BackupableDBOptions::backup_log_files = false
* for persisting in-memory databases.
* Default: false
*/
public RestoreOptions(boolean keepLogFiles) { public RestoreOptions(boolean keepLogFiles) {
super(); super();
nativeHandle_ = newRestoreOptions(keepLogFiles); nativeHandle_ = newRestoreOptions(keepLogFiles);

@ -17,7 +17,7 @@ import org.rocksdb.NativeLibraryLoader;
* A RocksDB is a persistent ordered map from keys to values. It is safe for * A RocksDB is a persistent ordered map from keys to values. It is safe for
* concurrent access from multiple threads without any external synchronization. * concurrent access from multiple threads without any external synchronization.
* All methods of this class could potentially throw RocksDBException, which * All methods of this class could potentially throw RocksDBException, which
* indicates sth wrong at the rocksdb library side and the call failed. * indicates sth wrong at the RocksDB library side and the call failed.
*/ */
public class RocksDB extends RocksObject { public class RocksDB extends RocksObject {
public static final int NOT_FOUND = -1; public static final int NOT_FOUND = -1;
@ -95,12 +95,11 @@ public class RocksDB extends RocksObject {
* set to true. * set to true.
* *
* @param path the path to the rocksdb. * @param path the path to the rocksdb.
* @param status an out value indicating the status of the Open().
* @return a rocksdb instance on success, null if the specified rocksdb can * @return a rocksdb instance on success, null if the specified rocksdb can
* not be opened. * not be opened.
* *
* @see Options.setCreateIfMissing() * @see Options#setCreateIfMissing(boolean)
* @see Options.createIfMissing() * @see org.rocksdb.Options#createIfMissing()
*/ */
public static RocksDB open(String path) throws RocksDBException { public static RocksDB open(String path) throws RocksDBException {
RocksDB db = new RocksDB(); RocksDB db = new RocksDB();
@ -280,8 +279,8 @@ public class RocksDB extends RocksObject {
/** /**
* Returns a map of keys for which values were found in DB. * Returns a map of keys for which values were found in DB.
* *
* @param List of keys for which values need to be retrieved.
* @param opt Read options. * @param opt Read options.
* @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map * @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB. * entry is the corresponding value in DB.
* *

@ -7,16 +7,22 @@ package org.rocksdb;
/** /**
* RocksObject is the base-class of all RocksDB classes that has a pointer to * RocksObject is the base-class of all RocksDB classes that has a pointer to
* some c++ rocksdb object. * some c++ {@code rocksdb} object.
* *
* RocksObject has dispose() function, which releases its associated c++ resource. * <p>
* RocksObject has {@code dispose()} function, which releases its associated c++
* resource.
* </p>
* </p>
* This function can be either called manually, or being called automatically * This function can be either called manually, or being called automatically
* during the regular Java GC process. However, since Java may wrongly assume a * during the regular Java GC process. However, since Java may wrongly assume a
* RocksObject only contains a long member variable and think it is small in size, * RocksObject only contains a long member variable and think it is small in size,
* Java may give RocksObject low priority in the GC process. For this, it is * </p>
* suggested to call dispose() manually. However, it is safe to let RocksObject go * <p>Java may give {@code RocksObject} low priority in the GC process. For this, it is
* out-of-scope without manually calling dispose() as dispose() will be called * suggested to call {@code dispose()} manually. However, it is safe to let
* in the finalizer during the regular GC process. * {@code RocksObject} go out-of-scope without manually calling {@code dispose()}
* as {@code dispose()} will be called in the finalizer during the
* regular GC process.</p>
*/ */
public abstract class RocksObject { public abstract class RocksObject {
protected RocksObject() { protected RocksObject() {
@ -26,16 +32,18 @@ public abstract class RocksObject {
/** /**
* Release the c++ object manually pointed by the native handle. * Release the c++ object manually pointed by the native handle.
* * <p>
* Note that dispose() will also be called during the GC process * Note that {@code dispose()} will also be called during the GC process
* if it was not called before its RocksObject went out-of-scope. * if it was not called before its {@code RocksObject} went out-of-scope.
* However, since Java may wrongly wrongly assume those objects are * However, since Java may wrongly wrongly assume those objects are
* small in that they seems to only hold a long variable. As a result, * small in that they seems to only hold a long variable. As a result,
* they might have low priority in the GC process. To prevent this, * they might have low priority in the GC process. To prevent this,
* it is suggested to call dispose() manually. * it is suggested to call {@code dispose()} manually.
* * <p>
* Note that once an instance of RocksObject has been disposed, * <p>
* Note that once an instance of {@code RocksObject} has been disposed,
* calling its function will lead undefined behavior. * calling its function will lead undefined behavior.
* </p>
*/ */
public final synchronized void dispose() { public final synchronized void dispose() {
if (isOwningNativeHandle() && isInitialized()) { if (isOwningNativeHandle() && isInitialized()) {
@ -46,40 +54,41 @@ public abstract class RocksObject {
} }
/** /**
* The helper function of dispose() which all subclasses of RocksObject * The helper function of {@code dispose()} which all subclasses of
* must implement to release their associated C++ resource. * {@code RocksObject} must implement to release their associated
* C++ resource.
*/ */
protected abstract void disposeInternal(); protected abstract void disposeInternal();
/** /**
* Revoke ownership of the native object. * Revoke ownership of the native object.
* * <p>
* This will prevent the object from attempting to delete the underlying * This will prevent the object from attempting to delete the underlying
* native object in its finalizer. This must be used when another object * native object in its finalizer. This must be used when another object
* takes over ownership of the native object or both will attempt to delete * takes over ownership of the native object or both will attempt to delete
* the underlying object when garbage collected. * the underlying object when garbage collected.
* * <p>
* When disOwnNativeHandle() is called, dispose() will simply set nativeHandle_ * When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set
* to 0 without releasing its associated C++ resource. As a result, * {@code nativeHandle_} to 0 without releasing its associated C++ resource.
* incorrectly use this function may cause memory leak, and this function call * As a result, incorrectly use this function may cause memory leak, and this
* will not affect the return value of isInitialized(). * function call will not affect the return value of {@code isInitialized()}.
* * </p>
* @see dispose() * @see #dispose()
* @see isInitialized() * @see #isInitialized()
*/ */
protected void disOwnNativeHandle() { protected void disOwnNativeHandle() {
owningHandle_ = false; owningHandle_ = false;
} }
/** /**
* Returns true if the current RocksObject is responsable to release its * Returns true if the current {@code RocksObject} is responsible to release
* native handle. * its native handle.
* *
* @return true if the current RocksObject is responsible to release its * @return true if the current {@code RocksObject} is responsible to release
* native handle. * its native handle.
* *
* @see disOwnNativeHandle() * @see #disOwnNativeHandle()
* @see dispose() * @see #dispose()
*/ */
protected boolean isOwningNativeHandle() { protected boolean isOwningNativeHandle() {
return owningHandle_; return owningHandle_;
@ -90,14 +99,14 @@ public abstract class RocksObject {
* *
* @return true if the associated native handle has been initialized. * @return true if the associated native handle has been initialized.
* *
* @see dispose() * @see #dispose()
*/ */
protected boolean isInitialized() { protected boolean isInitialized() {
return (nativeHandle_ != 0); return (nativeHandle_ != 0);
} }
/** /**
* Simply calls dispose() and release its c++ resource if it has not * Simply calls {@code dispose()} and release its c++ resource if it has not
* yet released. * yet released.
*/ */
@Override protected void finalize() { @Override protected void finalize() {
@ -110,8 +119,8 @@ public abstract class RocksObject {
protected long nativeHandle_; protected long nativeHandle_;
/** /**
* A flag indicating whether the current RocksObject is responsible to * A flag indicating whether the current {@code RocksObject} is responsible to
* release the c++ object stored in its nativeHandle_. * release the c++ object stored in its {@code nativeHandle_}.
*/ */
private boolean owningHandle_; private boolean owningHandle_;
} }

Loading…
Cancel
Save