Fix formatting identified by `arc lint`

main
Adam Retter 9 years ago
parent 0f2d2fcff6
commit 188bb2e7ad
  1. 3
      java/rocksjni/options.cc
  2. 9
      java/rocksjni/portal.h
  3. 4
      java/rocksjni/rocksjni.cc
  4. 15
      java/src/main/java/org/rocksdb/AbstractWriteBatch.java
  5. 10
      java/src/main/java/org/rocksdb/BackupableDB.java
  6. 66
      java/src/main/java/org/rocksdb/BackupableDBOptions.java
  7. 6
      java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
  8. 31
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  9. 4
      java/src/main/java/org/rocksdb/DBOptions.java
  10. 3
      java/src/main/java/org/rocksdb/DirectComparator.java
  11. 16
      java/src/main/java/org/rocksdb/Options.java
  12. 3
      java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
  13. 3
      java/src/main/java/org/rocksdb/RestoreBackupableDB.java
  14. 11
      java/src/main/java/org/rocksdb/RestoreOptions.java
  15. 7
      java/src/main/java/org/rocksdb/TtlDB.java
  16. 6
      java/src/main/java/org/rocksdb/WriteBatch.java
  17. 49
      java/src/main/java/org/rocksdb/WriteBatchWithIndex.java

@ -1085,7 +1085,8 @@ std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper(
std::vector<rocksdb::CompressionType> compressionLevels; std::vector<rocksdb::CompressionType> compressionLevels;
jsize len = env->GetArrayLength(jcompressionLevels); jsize len = env->GetArrayLength(jcompressionLevels);
jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, NULL); jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels,
NULL);
for(int i = 0; i < len; i++) { for(int i = 0; i < len; i++) {
jbyte jcl; jbyte jcl;
jcl = jcompressionLevel[i]; jcl = jcompressionLevel[i];

@ -52,7 +52,8 @@ template<class PTR, class DERIVED> class RocksDBNativeClass {
}; };
// Native class template for sub-classes of RocksMutableObject // Native class template for sub-classes of RocksMutableObject
template<class PTR, class DERIVED> class NativeRocksMutableObject : public RocksDBNativeClass<PTR, DERIVED> { template<class PTR, class DERIVED> class NativeRocksMutableObject
: public RocksDBNativeClass<PTR, DERIVED> {
public: public:
static jmethodID getSetNativeHandleMethod(JNIEnv* env) { static jmethodID getSetNativeHandleMethod(JNIEnv* env) {
@ -63,8 +64,10 @@ template<class PTR, class DERIVED> class NativeRocksMutableObject : public Rocks
} }
// Pass the pointer to the java side. // Pass the pointer to the java side.
static void setHandle(JNIEnv* env, jobject jobj, PTR ptr, jboolean java_owns_handle) { static void setHandle(JNIEnv* env, jobject jobj, PTR ptr,
env->CallVoidMethod(jobj, getSetNativeHandleMethod(env), reinterpret_cast<jlong>(ptr), java_owns_handle); jboolean java_owns_handle) {
env->CallVoidMethod(jobj, getSetNativeHandleMethod(env),
reinterpret_cast<jlong>(ptr), java_owns_handle);
} }
}; };

@ -156,8 +156,8 @@ jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J(
jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J(
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options) { jobjectArray jcolumn_names, jlongArray jcolumn_options) {
return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names,
(rocksdb::Status(*) jcolumn_options, (rocksdb::Status(*)
(const rocksdb::DBOptions&, const std::string&, (const rocksdb::DBOptions&, const std::string&,
const std::vector<rocksdb::ColumnFamilyDescriptor>&, const std::vector<rocksdb::ColumnFamilyDescriptor>&,
std::vector<rocksdb::ColumnFamilyHandle*>*, rocksdb::DB**) std::vector<rocksdb::ColumnFamilyHandle*>*, rocksdb::DB**)

@ -5,7 +5,8 @@
package org.rocksdb; package org.rocksdb;
public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { public abstract class AbstractWriteBatch extends RocksObject
implements WriteBatchInterface {
protected AbstractWriteBatch(final long nativeHandle) { protected AbstractWriteBatch(final long nativeHandle) {
super(nativeHandle); super(nativeHandle);
@ -24,9 +25,11 @@ public abstract class AbstractWriteBatch extends RocksObject implements WriteBat
} }
@Override @Override
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key,
byte[] value) {
assert (isOwningHandle()); assert (isOwningHandle());
put(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); put(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
} }
@Override @Override
@ -36,9 +39,11 @@ public abstract class AbstractWriteBatch extends RocksObject implements WriteBat
} }
@Override @Override
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key,
byte[] value) {
assert (isOwningHandle()); assert (isOwningHandle());
merge(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); merge(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
} }
@Override @Override

@ -21,8 +21,8 @@ public class BackupableDB extends RocksDB {
* *
* @param opt {@link org.rocksdb.Options} to set for the database. * @param opt {@link org.rocksdb.Options} to set for the database.
* @param bopt {@link org.rocksdb.BackupableDBOptions} to use. * @param bopt {@link org.rocksdb.BackupableDBOptions} to use.
* @param db_path Path to store data to. The path for storing the backup should be * @param db_path Path to store data to. The path for storing the backup
* specified in the {@link org.rocksdb.BackupableDBOptions}. * should be specified in the {@link org.rocksdb.BackupableDBOptions}.
* *
* @return {@link BackupableDB} reference to the opened database. * @return {@link BackupableDB} reference to the opened database.
* *
@ -34,7 +34,8 @@ public class BackupableDB extends RocksDB {
throws RocksDBException { throws RocksDBException {
final RocksDB db = RocksDB.open(opt, db_path); final RocksDB db = RocksDB.open(opt, db_path);
final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_, bopt.nativeHandle_)); final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_,
bopt.nativeHandle_));
// Prevent the RocksDB object from attempting to delete // Prevent the RocksDB object from attempting to delete
// the underly C++ DB object. // the underly C++ DB object.
@ -151,7 +152,8 @@ public class BackupableDB extends RocksDB {
super.finalize(); super.finalize();
} }
protected native static long open(final long rocksDBHandle, final long backupDBOptionsHandle); protected native static long open(final long rocksDBHandle,
final long backupDBOptionsHandle);
protected native void createNewBackup(long handle, boolean flag) protected native void createNewBackup(long handle, boolean flag)
throws RocksDBException; throws RocksDBException;
protected native void purgeOldBackups(long handle, int numBackupsToKeep) protected native void purgeOldBackups(long handle, int numBackupsToKeep)

@ -21,8 +21,8 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>BackupableDBOptions constructor.</p> * <p>BackupableDBOptions constructor.</p>
* *
* @param path Where to keep the backup files. Has to be different than db name. * @param path Where to keep the backup files. Has to be different than db
* Best to set this to {@code db name_ + "/backups"} * name. Best to set this to {@code db name_ + "/backups"}
* @throws java.lang.IllegalArgumentException if illegal path is used. * @throws java.lang.IllegalArgumentException if illegal path is used.
*/ */
public BackupableDBOptions(final String path) { public BackupableDBOptions(final String path) {
@ -31,7 +31,8 @@ public class BackupableDBOptions extends RocksObject {
private static String ensureWritableFile(final String path) { private static String ensureWritableFile(final String path) {
final File backupPath = path == null ? null : new File(path); final File backupPath = path == null ? null : new File(path);
if (backupPath == null || !backupPath.isDirectory() || !backupPath.canWrite()) { if (backupPath == null || !backupPath.isDirectory() ||
!backupPath.canWrite()) {
throw new IllegalArgumentException("Illegal path provided."); throw new IllegalArgumentException("Illegal path provided.");
} else { } else {
return path; return path;
@ -51,10 +52,11 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Share table files between backups.</p> * <p>Share table files between backups.</p>
* *
* @param shareTableFiles If {@code share_table_files == true}, backup will assume * @param shareTableFiles If {@code share_table_files == true}, backup will
* that table files with same name have the same contents. This enables incremental * assume that table files with same name have the same contents. This
* backups and avoids unnecessary data copies. If {@code share_table_files == false}, * enables incremental backups and avoids unnecessary data copies. If
* each backup will be on its own and will not share any data with other backups. * {@code share_table_files == false}, each backup will be on its own and
* will not share any data with other backups.
* *
* <p>Default: true</p> * <p>Default: true</p>
* *
@ -80,10 +82,10 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Set synchronous backups.</p> * <p>Set synchronous backups.</p>
* *
* @param sync If {@code sync == true}, we can guarantee you'll get consistent backup * @param sync If {@code sync == true}, we can guarantee you'll get consistent
* even on a machine crash/reboot. Backup process is slower with sync enabled. * backup even on a machine crash/reboot. Backup process is slower with sync
* If {@code sync == false}, we don't guarantee anything on machine reboot. * enabled. If {@code sync == false}, we don't guarantee anything on machine
* However,chances are some of the backups are consistent. * reboot. However, chances are some of the backups are consistent.
* *
* <p>Default: true</p> * <p>Default: true</p>
* *
@ -108,7 +110,8 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Set if old data will be destroyed.</p> * <p>Set if old data will be destroyed.</p>
* *
* @param destroyOldData If true, it will delete whatever backups there are already. * @param destroyOldData If true, it will delete whatever backups there are
* already.
* *
* <p>Default: false</p> * <p>Default: false</p>
* *
@ -133,9 +136,9 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Set if log files shall be persisted.</p> * <p>Set if log files shall be persisted.</p>
* *
* @param backupLogFiles If false, we won't backup log files. This option can be * @param backupLogFiles If false, we won't backup log files. This option can
* useful for backing up in-memory databases where log file are persisted,but table * be useful for backing up in-memory databases where log file are
* files are in memory. * persisted, but table files are in memory.
* *
* <p>Default: true</p> * <p>Default: true</p>
* *
@ -160,8 +163,8 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Set backup rate limit.</p> * <p>Set backup rate limit.</p>
* *
* @param backupRateLimit Max bytes that can be transferred in a second during backup. * @param backupRateLimit Max bytes that can be transferred in a second during
* If 0 or negative, then go as fast as you can. * backup. If 0 or negative, then go as fast as you can.
* *
* <p>Default: 0</p> * <p>Default: 0</p>
* *
@ -175,10 +178,11 @@ public class BackupableDBOptions extends RocksObject {
} }
/** /**
* <p>Return backup rate limit which described the max bytes that can be transferred in a * <p>Return backup rate limit which described the max bytes that can be
* second during backup.</p> * transferred in a second during backup.</p>
* *
* @return numerical value describing the backup transfer limit in bytes per second. * @return numerical value describing the backup transfer limit in bytes per
* second.
*/ */
public long backupRateLimit() { public long backupRateLimit() {
assert(isOwningHandle()); assert(isOwningHandle());
@ -188,8 +192,8 @@ public class BackupableDBOptions extends RocksObject {
/** /**
* <p>Set restore rate limit.</p> * <p>Set restore rate limit.</p>
* *
* @param restoreRateLimit Max bytes that can be transferred in a second during restore. * @param restoreRateLimit Max bytes that can be transferred in a second
* If 0 or negative, then go as fast as you can. * during restore. If 0 or negative, then go as fast as you can.
* *
* <p>Default: 0</p> * <p>Default: 0</p>
* *
@ -203,10 +207,11 @@ public class BackupableDBOptions extends RocksObject {
} }
/** /**
* <p>Return restore rate limit which described the max bytes that can be transferred in a * <p>Return restore rate limit which described the max bytes that can be
* second during restore.</p> * transferred in a second during restore.</p>
* *
* @return numerical value describing the restore transfer limit in bytes per second. * @return numerical value describing the restore transfer limit in bytes per
* second.
*/ */
public long restoreRateLimit() { public long restoreRateLimit() {
assert(isOwningHandle()); assert(isOwningHandle());
@ -214,12 +219,13 @@ public class BackupableDBOptions extends RocksObject {
} }
/** /**
* <p>Only used if share_table_files is set to true. If true, will consider that * <p>Only used if share_table_files is set to true. If true, will consider
* backups can come from different databases, hence a sst is not uniquely * that backups can come from different databases, hence a sst is not uniquely
* identified by its name, but by the triple (file name, crc32, file length)</p> * identified by its name, but by the triple (file name, crc32, file length)
* </p>
* *
* @param shareFilesWithChecksum boolean value indicating if SST files are stored * @param shareFilesWithChecksum boolean value indicating if SST files are
* using the triple (file name, crc32, file length) and not its name. * stored using the triple (file name, crc32, file length) and not its name.
* *
* <p>Note: this is an experimental option, and you'll need to set it manually * <p>Note: this is an experimental option, and you'll need to set it manually
* turn it on only if you know what you're doing*</p> * turn it on only if you know what you're doing*</p>

@ -25,9 +25,9 @@ public class ColumnFamilyHandle extends RocksObject {
* <p>Deletes underlying C++ iterator pointer.</p> * <p>Deletes underlying C++ iterator pointer.</p>
* *
* <p>Note: the underlying handle can only be safely deleted if the RocksDB * <p>Note: the underlying handle can only be safely deleted if the RocksDB
* instance related to a certain ColumnFamilyHandle is still valid and initialized. * instance related to a certain ColumnFamilyHandle is still valid and
* Therefore {@code disposeInternal()} checks if the RocksDB is initialized * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is
* before freeing the native handle.</p> * initialized before freeing the native handle.</p>
*/ */
@Override @Override
protected void disposeInternal() { protected void disposeInternal() {

@ -13,8 +13,8 @@ import java.util.Properties;
* ColumnFamilyOptions to control the behavior of a database. It will be used * ColumnFamilyOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* *
* If {@link #dispose()} function is not called, then it will be GC'd automatically * If {@link #dispose()} function is not called, then it will be GC'd
* and native resources will be released as part of the process. * automatically and native resources will be released as part of the process.
*/ */
public class ColumnFamilyOptions extends RocksObject public class ColumnFamilyOptions extends RocksObject
implements ColumnFamilyOptionsInterface { implements ColumnFamilyOptionsInterface {
@ -112,7 +112,8 @@ public class ColumnFamilyOptions extends RocksObject
} }
@Override @Override
public ColumnFamilyOptions setComparator(final BuiltinComparator builtinComparator) { public ColumnFamilyOptions setComparator(
final BuiltinComparator builtinComparator) {
assert(isOwningHandle()); assert(isOwningHandle());
setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
return this; return this;
@ -139,13 +140,15 @@ public class ColumnFamilyOptions extends RocksObject
} }
@Override @Override
public ColumnFamilyOptions setMergeOperator(final MergeOperator mergeOperator) { public ColumnFamilyOptions setMergeOperator(
final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle());
return this; return this;
} }
public ColumnFamilyOptions setCompactionFilter( public ColumnFamilyOptions setCompactionFilter(
final AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter) { final AbstractCompactionFilter<? extends AbstractSlice<?>>
compactionFilter) {
setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_); setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_);
compactionFilter_ = compactionFilter; compactionFilter_ = compactionFilter;
return this; return this;
@ -205,7 +208,8 @@ public class ColumnFamilyOptions extends RocksObject
} }
@Override @Override
public ColumnFamilyOptions setCompressionType(final CompressionType compressionType) { public ColumnFamilyOptions setCompressionType(
final CompressionType compressionType) {
setCompressionType(nativeHandle_, compressionType.getValue()); setCompressionType(nativeHandle_, compressionType.getValue());
return this; return this;
} }
@ -522,7 +526,8 @@ public class ColumnFamilyOptions extends RocksObject
@Override @Override
public ColumnFamilyOptions setMaxSequentialSkipInIterations( public ColumnFamilyOptions setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) { final long maxSequentialSkipInIterations) {
setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations); setMaxSequentialSkipInIterations(nativeHandle_,
maxSequentialSkipInIterations);
return this; return this;
} }
@ -677,12 +682,12 @@ public class ColumnFamilyOptions extends RocksObject
private native void optimizeUniversalStyleCompaction(long handle, private native void optimizeUniversalStyleCompaction(long handle,
long memtableMemoryBudget); long memtableMemoryBudget);
private native void setComparatorHandle(long handle, int builtinComparator); private native void setComparatorHandle(long handle, int builtinComparator);
private native void setComparatorHandle(long optHandle, long comparatorHandle); private native void setComparatorHandle(long optHandle,
private native void setMergeOperatorName( long comparatorHandle);
long handle, String name); private native void setMergeOperatorName(long handle, String name);
private native void setMergeOperator( private native void setMergeOperator(long handle, long mergeOperatorHandle);
long handle, long mergeOperatorHandle); private native void setCompactionFilterHandle(long handle,
private native void setCompactionFilterHandle(long handle, long compactionFilterHandle); long compactionFilterHandle);
private native void setWriteBufferSize(long handle, long writeBufferSize) private native void setWriteBufferSize(long handle, long writeBufferSize)
throws IllegalArgumentException; throws IllegalArgumentException;
private native long writeBufferSize(long handle); private native long writeBufferSize(long handle);

@ -11,8 +11,8 @@ import java.util.Properties;
* DBOptions to control the behavior of a database. It will be used * DBOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* *
* If {@link #dispose()} function is not called, then it will be GC'd automatically * If {@link #dispose()} function is not called, then it will be GC'd
* and native resources will be released as part of the process. * automatically and native resources will be released as part of the process.
*/ */
public class DBOptions extends RocksObject implements DBOptionsInterface { public class DBOptions extends RocksObject implements DBOptionsInterface {
static { static {

@ -28,5 +28,6 @@ public abstract class DirectComparator extends AbstractComparator<DirectSlice> {
return nativeHandle_; return nativeHandle_;
} }
private native long createNewDirectComparator0(final long comparatorOptionsHandle); private native long createNewDirectComparator0(
final long comparatorOptionsHandle);
} }

@ -12,8 +12,8 @@ import java.util.List;
* Options to control the behavior of a database. It will be used * Options to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* *
* If {@link #dispose()} function is not called, then it will be GC'd automatically * If {@link #dispose()} function is not called, then it will be GC'd
* and native resources will be released as part of the process. * automaticallyand native resources will be released as part of the process.
*/ */
public class Options extends RocksObject public class Options extends RocksObject
implements DBOptionsInterface, ColumnFamilyOptionsInterface { implements DBOptionsInterface, ColumnFamilyOptionsInterface {
@ -41,7 +41,8 @@ public class Options extends RocksObject
*/ */
public Options(final DBOptions dbOptions, public Options(final DBOptions dbOptions,
final ColumnFamilyOptions columnFamilyOptions) { final ColumnFamilyOptions columnFamilyOptions) {
super(newOptions(dbOptions.nativeHandle_, columnFamilyOptions.nativeHandle_)); super(newOptions(dbOptions.nativeHandle_,
columnFamilyOptions.nativeHandle_));
env_ = Env.getDefault(); env_ = Env.getDefault();
} }
@ -678,7 +679,8 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setCompressionPerLevel(final List<CompressionType> compressionLevels) { public Options setCompressionPerLevel(
final List<CompressionType> compressionLevels) {
final byte[] byteCompressionTypes = new byte[ final byte[] byteCompressionTypes = new byte[
compressionLevels.size()]; compressionLevels.size()];
for (int i = 0; i < compressionLevels.size(); i++) { for (int i = 0; i < compressionLevels.size(); i++) {
@ -973,7 +975,8 @@ public class Options extends RocksObject
@Override @Override
public Options setMaxSequentialSkipInIterations( public Options setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) { final long maxSequentialSkipInIterations) {
setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations); setMaxSequentialSkipInIterations(nativeHandle_,
maxSequentialSkipInIterations);
return this; return this;
} }
@ -1189,7 +1192,8 @@ public class Options extends RocksObject
private native void optimizeUniversalStyleCompaction(long handle, private native void optimizeUniversalStyleCompaction(long handle,
long memtableMemoryBudget); long memtableMemoryBudget);
private native void setComparatorHandle(long handle, int builtinComparator); private native void setComparatorHandle(long handle, int builtinComparator);
private native void setComparatorHandle(long optHandle, long comparatorHandle); private native void setComparatorHandle(long optHandle,
long comparatorHandle);
private native void setMergeOperatorName( private native void setMergeOperatorName(
long handle, String name); long handle, String name);
private native void setMergeOperator( private native void setMergeOperator(

@ -8,7 +8,8 @@ package org.rocksdb;
/** /**
* Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++
*/ */
public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter<Slice> { public class RemoveEmptyValueCompactionFilter
extends AbstractCompactionFilter<Slice> {
public RemoveEmptyValueCompactionFilter() { public RemoveEmptyValueCompactionFilter() {
super(createNewRemoveEmptyValueCompactionFilter0()); super(createNewRemoveEmptyValueCompactionFilter0());
} }

@ -153,5 +153,6 @@ public class RestoreBackupableDB extends RocksObject {
private native int[] getCorruptedBackups(long handle); private native int[] getCorruptedBackups(long handle);
private native void garbageCollect(long handle) private native void garbageCollect(long handle)
throws RocksDBException; throws RocksDBException;
@Override protected final native void disposeInternal(final long nativeHandle); @Override protected final native void disposeInternal(
final long nativeHandle);
} }

@ -16,11 +16,12 @@ public class RestoreOptions extends RocksObject {
/** /**
* Constructor * Constructor
* *
* @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It * @param keepLogFiles If true, restore won't overwrite the existing log files
* will also move all log files from archive directory to wal_dir. Use this * in wal_dir. It will also move all log files from archive directory to
* option in combination with BackupableDBOptions::backup_log_files = false * wal_dir. Use this option in combination with
* for persisting in-memory databases. * BackupableDBOptions::backup_log_files = false for persisting in-memory
* Default: false * databases.
* Default: false
*/ */
public RestoreOptions(final boolean keepLogFiles) { public RestoreOptions(final boolean keepLogFiles) {
super(newRestoreOptions(keepLogFiles)); super(newRestoreOptions(keepLogFiles));

@ -112,14 +112,15 @@ public class TtlDB extends RocksDB {
final List<Integer> ttlValues, final boolean readOnly) final List<Integer> ttlValues, final boolean readOnly)
throws RocksDBException { throws RocksDBException {
if (columnFamilyDescriptors.size() != ttlValues.size()) { if (columnFamilyDescriptors.size() != ttlValues.size()) {
throw new IllegalArgumentException("There must be a ttl value per column" + throw new IllegalArgumentException("There must be a ttl value per column"
"family handle."); + "family handle.");
} }
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) { for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); final ColumnFamilyDescriptor cfDescriptor =
columnFamilyDescriptors.get(i);
cfNames[i] = cfDescriptor.columnFamilyName(); cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
} }

@ -84,13 +84,15 @@ public class WriteBatch extends AbstractWriteBatch {
@Override final native void clear0(final long handle); @Override final native void clear0(final long handle);
private native static long newWriteBatch(final int reserved_bytes); private native static long newWriteBatch(final int reserved_bytes);
private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; private native void iterate(final long handle, final long handlerHandle)
throws RocksDBException;
/** /**
* Handler callback for iterating over the contents of a batch. * Handler callback for iterating over the contents of a batch.
*/ */
public static abstract class Handler extends AbstractImmutableNativeReference { public static abstract class Handler
extends AbstractImmutableNativeReference {
private final long nativeHandle_; private final long nativeHandle_;
public Handler() { public Handler() {
super(true); super(true);

@ -12,10 +12,10 @@ package org.rocksdb;
* Calling put, merge, remove or putLogData calls the same function * Calling put, merge, remove or putLogData calls the same function
* as with {@link org.rocksdb.WriteBatch} whilst also building an index. * as with {@link org.rocksdb.WriteBatch} whilst also building an index.
* *
* A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator() }to create an iterator * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
* over the write batch or * create an iterator over the write batch or
* {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} to * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
* get an iterator for the database with Read-Your-Own-Writes like capability * to get an iterator for the database with Read-Your-Own-Writes like capability
*/ */
public class WriteBatchWithIndex extends AbstractWriteBatch { public class WriteBatchWithIndex extends AbstractWriteBatch {
/** /**
@ -56,9 +56,12 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* inserting a duplicate key, in this way an iterator will never * inserting a duplicate key, in this way an iterator will never
* show two entries with the same key. * show two entries with the same key.
*/ */
public WriteBatchWithIndex(final AbstractComparator<? extends AbstractSlice<?>> public WriteBatchWithIndex(
fallbackIndexComparator, final int reservedBytes, final boolean overwriteKey) { final AbstractComparator<? extends AbstractSlice<?>>
super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(), reservedBytes, overwriteKey)); fallbackIndexComparator, final int reservedBytes,
final boolean overwriteKey) {
super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(),
reservedBytes, overwriteKey));
} }
/** /**
@ -70,9 +73,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* time. * time.
* *
* @param columnFamilyHandle The column family to iterate over * @param columnFamilyHandle The column family to iterate over
* @return An iterator for the Write Batch contents, restricted to the column family * @return An iterator for the Write Batch contents, restricted to the column
* family
*/ */
public WBWIRocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) { public WBWIRocksIterator newIterator(
final ColumnFamilyHandle columnFamilyHandle) {
return new WBWIRocksIterator(this, iterator1(nativeHandle_, return new WBWIRocksIterator(this, iterator1(nativeHandle_,
columnFamilyHandle.nativeHandle_)); columnFamilyHandle.nativeHandle_));
} }
@ -97,11 +102,13 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* as a delta and baseIterator as a base * as a delta and baseIterator as a base
* *
* @param columnFamilyHandle The column family to iterate over * @param columnFamilyHandle The column family to iterate over
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} * @param baseIterator The base iterator,
* @return An iterator which shows a view comprised of both the database point-in-time * e.g. {@link org.rocksdb.RocksDB#newIterator()}
* from baseIterator and modifications made in this write batch. * @return An iterator which shows a view comprised of both the database
* point-in-time from baseIterator and modifications made in this write batch.
*/ */
public RocksIterator newIteratorWithBase(final ColumnFamilyHandle columnFamilyHandle, public RocksIterator newIteratorWithBase(
final ColumnFamilyHandle columnFamilyHandle,
final RocksIterator baseIterator) { final RocksIterator baseIterator) {
RocksIterator iterator = new RocksIterator( RocksIterator iterator = new RocksIterator(
baseIterator.parent_, baseIterator.parent_,
@ -116,14 +123,17 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
/** /**
* Provides Read-Your-Own-Writes like functionality by * Provides Read-Your-Own-Writes like functionality by
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
* as a delta and baseIterator as a base. Operates on the default column family. * as a delta and baseIterator as a base. Operates on the default column
* family.
* *
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} * @param baseIterator The base iterator,
* @return An iterator which shows a view comprised of both the database point-in-time * e.g. {@link org.rocksdb.RocksDB#newIterator()}
* from baseIterator and modifications made in this write batch. * @return An iterator which shows a view comprised of both the database
* point-in-timefrom baseIterator and modifications made in this write batch.
*/ */
public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(),
baseIterator);
} }
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
@ -153,5 +163,6 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
final boolean overwriteKey); final boolean overwriteKey);
private native long iterator0(final long handle); private native long iterator0(final long handle);
private native long iterator1(final long handle, final long cfHandle); private native long iterator1(final long handle, final long cfHandle);
private native long iteratorWithBase(final long handle, final long baseIteratorHandle, final long cfHandle); private native long iteratorWithBase(final long handle,
final long baseIteratorHandle, final long cfHandle);
} }

Loading…
Cancel
Save