Minimal RocksJava compliance with Java 8 language level (EB 1046) (#10951)

Summary:
Apply a small (and automatic) set of IntelliJ Java inspections/repairs to the Java interface to RocksDB Java and its tests.
Partly enabled by the fact that we now (from RocksDB7) require java 8.

Explicit <p> in empty lines in javadoc comments.

Parameters and variables made final where possible.
Anonymous subclasses converted lambdas.

Some tests which previously used other assertion models were converted to assertj, e.g. (assertThat(actual).isEqualTo(expected)

In a very few cases tests were found to be inoperative or broken, and were repaired. No problems with actual RocksDB behaviour were observed.

This PR is intended to replace https://github.com/facebook/rocksdb/pull/9618 - that PR was not merged, and attempts to rebase it have yielded a questionable looking diff, so we choose to go back to square 1 here, and implement a conservative set of changes.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10951

Reviewed By: anand1976

Differential Revision: D45057849

Pulled By: ajkr

fbshipit-source-id: e4ea46bfc80518ae86f37702b03ca9352bc11c3d
oxigraph-8.3.2
Alan Paxton 2 years ago committed by Facebook GitHub Bot
parent 586d78b31e
commit e110d713e0
  1. 8
      java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
  2. 4
      java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
  3. 6
      java/src/main/java/org/rocksdb/AbstractComparator.java
  4. 22
      java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
  5. 8
      java/src/main/java/org/rocksdb/AbstractEventListener.java
  6. 24
      java/src/main/java/org/rocksdb/AbstractMutableOptions.java
  7. 3
      java/src/main/java/org/rocksdb/AbstractNativeReference.java
  8. 8
      java/src/main/java/org/rocksdb/AbstractSlice.java
  9. 2
      java/src/main/java/org/rocksdb/AbstractTraceWriter.java
  10. 4
      java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java
  11. 2
      java/src/main/java/org/rocksdb/AbstractWalFilter.java
  12. 33
      java/src/main/java/org/rocksdb/AbstractWriteBatch.java
  13. 23
      java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
  14. 136
      java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
  15. 16
      java/src/main/java/org/rocksdb/BackupEngine.java
  16. 32
      java/src/main/java/org/rocksdb/BackupEngineOptions.java
  17. 10
      java/src/main/java/org/rocksdb/BackupInfo.java
  18. 54
      java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
  19. 2
      java/src/main/java/org/rocksdb/BloomFilter.java
  20. 2
      java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
  21. 4
      java/src/main/java/org/rocksdb/Cache.java
  22. 5
      java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
  23. 6
      java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java
  24. 6
      java/src/main/java/org/rocksdb/Checkpoint.java
  25. 4
      java/src/main/java/org/rocksdb/ClockCache.java
  26. 12
      java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
  27. 2
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  28. 32
      java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
  29. 11
      java/src/main/java/org/rocksdb/CompactRangeOptions.java
  30. 2
      java/src/main/java/org/rocksdb/CompactionJobInfo.java
  31. 14
      java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
  32. 38
      java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
  33. 5
      java/src/main/java/org/rocksdb/CompactionStyle.java
  34. 16
      java/src/main/java/org/rocksdb/ComparatorOptions.java
  35. 15
      java/src/main/java/org/rocksdb/CompressionOptions.java
  36. 6
      java/src/main/java/org/rocksdb/CompressionType.java
  37. 12
      java/src/main/java/org/rocksdb/ConfigOptions.java
  38. 8
      java/src/main/java/org/rocksdb/DBOptions.java
  39. 11
      java/src/main/java/org/rocksdb/DirectSlice.java
  40. 2
      java/src/main/java/org/rocksdb/EncodingType.java
  41. 2
      java/src/main/java/org/rocksdb/Env.java
  42. 20
      java/src/main/java/org/rocksdb/EnvOptions.java
  43. 38
      java/src/main/java/org/rocksdb/EventListener.java
  44. 4
      java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java
  45. 2
      java/src/main/java/org/rocksdb/FileOperationInfo.java
  46. 8
      java/src/main/java/org/rocksdb/FlushJobInfo.java
  47. 8
      java/src/main/java/org/rocksdb/FlushOptions.java
  48. 2
      java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
  49. 2
      java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
  50. 2
      java/src/main/java/org/rocksdb/HistogramType.java
  51. 2
      java/src/main/java/org/rocksdb/IndexType.java
  52. 2
      java/src/main/java/org/rocksdb/InfoLogLevel.java
  53. 28
      java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
  54. 1
      java/src/main/java/org/rocksdb/KeyMayExist.java
  55. 2
      java/src/main/java/org/rocksdb/LRUCache.java
  56. 7
      java/src/main/java/org/rocksdb/Logger.java
  57. 4
      java/src/main/java/org/rocksdb/MemTableConfig.java
  58. 4
      java/src/main/java/org/rocksdb/MemTableInfo.java
  59. 2
      java/src/main/java/org/rocksdb/MemoryUsageType.java
  60. 22
      java/src/main/java/org/rocksdb/MemoryUtil.java
  61. 22
      java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
  62. 18
      java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
  63. 14
      java/src/main/java/org/rocksdb/MutableDBOptions.java
  64. 48
      java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java
  65. 3
      java/src/main/java/org/rocksdb/MutableOptionValue.java
  66. 2
      java/src/main/java/org/rocksdb/NativeComparatorWrapper.java
  67. 2
      java/src/main/java/org/rocksdb/OperationType.java
  68. 12
      java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
  69. 2
      java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
  70. 14
      java/src/main/java/org/rocksdb/OptionString.java
  71. 84
      java/src/main/java/org/rocksdb/Options.java
  72. 25
      java/src/main/java/org/rocksdb/OptionsUtil.java
  73. 6
      java/src/main/java/org/rocksdb/PersistentCache.java
  74. 16
      java/src/main/java/org/rocksdb/PlainTableConfig.java
  75. 65
      java/src/main/java/org/rocksdb/ReadOptions.java
  76. 2
      java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
  77. 4
      java/src/main/java/org/rocksdb/RestoreOptions.java
  78. 4
      java/src/main/java/org/rocksdb/RocksCallbackObject.java
  79. 212
      java/src/main/java/org/rocksdb/RocksDB.java
  80. 3
      java/src/main/java/org/rocksdb/RocksEnv.java
  81. 2
      java/src/main/java/org/rocksdb/RocksMutableObject.java
  82. 5
      java/src/main/java/org/rocksdb/Slice.java
  83. 2
      java/src/main/java/org/rocksdb/Snapshot.java
  84. 22
      java/src/main/java/org/rocksdb/SstFileManager.java
  85. 8
      java/src/main/java/org/rocksdb/SstFileReader.java
  86. 7
      java/src/main/java/org/rocksdb/SstFileWriter.java
  87. 4
      java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java
  88. 2
      java/src/main/java/org/rocksdb/StateType.java
  89. 11
      java/src/main/java/org/rocksdb/Statistics.java
  90. 33
      java/src/main/java/org/rocksdb/StatisticsCollector.java
  91. 2
      java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
  92. 2
      java/src/main/java/org/rocksdb/StatsLevel.java
  93. 6
      java/src/main/java/org/rocksdb/Status.java
  94. 8
      java/src/main/java/org/rocksdb/StringAppendOperator.java
  95. 4
      java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
  96. 4
      java/src/main/java/org/rocksdb/TableFileCreationInfo.java
  97. 4
      java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
  98. 2
      java/src/main/java/org/rocksdb/TableFormatConfig.java
  99. 4
      java/src/main/java/org/rocksdb/TableProperties.java
  100. 4
      java/src/main/java/org/rocksdb/ThreadStatus.java
  101. Some files were not shown because too many files have changed in this diff Show More

@ -7,8 +7,8 @@ package org.rocksdb;
/** /**
* A CompactionFilter allows an application to modify/delete a key-value at * A CompactionFilter allows an application to modify/delete a key-value at
* the time of compaction. * the time of compaction.
* * <p>
* At present we just permit an overriding Java class to wrap a C++ * At present, we just permit an overriding Java class to wrap a C++
* implementation * implementation
*/ */
public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>> public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>>
@ -49,10 +49,10 @@ public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>>
/** /**
* Deletes underlying C++ compaction pointer. * Deletes underlying C++ compaction pointer.
* * <p>
* Note that this function should be called only after all * Note that this function should be called only after all
* RocksDB instances referencing the compaction filter are closed. * RocksDB instances referencing the compaction filter are closed.
* Otherwise an undefined behavior will occur. * Otherwise, an undefined behavior will occur.
*/ */
@Override @Override
protected final native void disposeInternal(final long handle); protected final native void disposeInternal(final long handle);

@ -15,7 +15,7 @@ public abstract class AbstractCompactionFilterFactory<T extends AbstractCompacti
extends RocksCallbackObject { extends RocksCallbackObject {
public AbstractCompactionFilterFactory() { public AbstractCompactionFilterFactory() {
super(null); super(0L);
} }
@Override @Override
@ -55,7 +55,7 @@ public abstract class AbstractCompactionFilterFactory<T extends AbstractCompacti
/** /**
* A name which identifies this compaction filter * A name which identifies this compaction filter
* * <p>
* The name will be printed to the LOG file on start up for diagnosis * The name will be printed to the LOG file on start up for diagnosis
* *
* @return name which identifies this compaction filter. * @return name which identifies this compaction filter.

@ -31,7 +31,7 @@ public abstract class AbstractComparator
/** /**
* Get the type of this comparator. * Get the type of this comparator.
* * <p>
* Used for determining the correct C++ cast in native code. * Used for determining the correct C++ cast in native code.
* *
* @return The type of the comparator. * @return The type of the comparator.
@ -44,11 +44,11 @@ public abstract class AbstractComparator
* The name of the comparator. Used to check for comparator * The name of the comparator. Used to check for comparator
* mismatches (i.e., a DB created with one comparator is * mismatches (i.e., a DB created with one comparator is
* accessed using a different comparator). * accessed using a different comparator).
* * <p>
* A new name should be used whenever * A new name should be used whenever
* the comparator implementation changes in a way that will cause * the comparator implementation changes in a way that will cause
* the relative ordering of any two keys to change. * the relative ordering of any two keys to change.
* * <p>
* Names starting with "rocksdb." are reserved and should not be used. * Names starting with "rocksdb." are reserved and should not be used.
* *
* @return The name of this comparator implementation * @return The name of this comparator implementation

@ -18,10 +18,9 @@ import java.nio.ByteBuffer;
* {@link org.rocksdb.AbstractComparator} clean. * {@link org.rocksdb.AbstractComparator} clean.
*/ */
class AbstractComparatorJniBridge { class AbstractComparatorJniBridge {
/** /**
* Only called from JNI. * Only called from JNI.
* * <p>
* Simply a bridge to calling * Simply a bridge to calling
* {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}, * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)},
* which ensures that the byte buffer lengths are correct * which ensures that the byte buffer lengths are correct
@ -38,10 +37,8 @@ class AbstractComparatorJniBridge {
* *
* @return the result of the comparison * @return the result of the comparison
*/ */
private static int compareInternal( private static int compareInternal(final AbstractComparator comparator, final ByteBuffer a,
final AbstractComparator comparator, final int aLen, final ByteBuffer b, final int bLen) {
final ByteBuffer a, final int aLen,
final ByteBuffer b, final int bLen) {
if (aLen != -1) { if (aLen != -1) {
a.mark(); a.mark();
a.limit(aLen); a.limit(aLen);
@ -65,7 +62,7 @@ class AbstractComparatorJniBridge {
/** /**
* Only called from JNI. * Only called from JNI.
* * <p>
* Simply a bridge to calling * Simply a bridge to calling
* {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
* which ensures that the byte buffer lengths are correct * which ensures that the byte buffer lengths are correct
@ -83,10 +80,8 @@ class AbstractComparatorJniBridge {
* @return either {@code startLen} if the start key is unchanged, otherwise * @return either {@code startLen} if the start key is unchanged, otherwise
* the new length of the start key * the new length of the start key
*/ */
private static int findShortestSeparatorInternal( private static int findShortestSeparatorInternal(final AbstractComparator comparator,
final AbstractComparator comparator, final ByteBuffer start, final int startLen, final ByteBuffer limit, final int limitLen) {
final ByteBuffer start, final int startLen,
final ByteBuffer limit, final int limitLen) {
if (startLen != -1) { if (startLen != -1) {
start.limit(startLen); start.limit(startLen);
} }
@ -99,7 +94,7 @@ class AbstractComparatorJniBridge {
/** /**
* Only called from JNI. * Only called from JNI.
* * <p>
* Simply a bridge to calling * Simply a bridge to calling
* {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
* which ensures that the byte buffer length is correct * which ensures that the byte buffer length is correct
@ -114,8 +109,7 @@ class AbstractComparatorJniBridge {
* @return either keyLen if the key is unchanged, otherwise the new length of the key * @return either keyLen if the key is unchanged, otherwise the new length of the key
*/ */
private static int findShortSuccessorInternal( private static int findShortSuccessorInternal(
final AbstractComparator comparator, final AbstractComparator comparator, final ByteBuffer key, final int keyLen) {
final ByteBuffer key, final int keyLen) {
if (keyLen != -1) { if (keyLen != -1) {
key.limit(keyLen); key.limit(keyLen);
} }

@ -71,8 +71,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen
/** /**
* Creates an Event Listener that will * Creates an Event Listener that will
* received all callbacks from C++. * receive all callbacks from C++.
* * <p>
* If you don't need all callbacks, it is much more efficient to * If you don't need all callbacks, it is much more efficient to
* just register for the ones you need by calling * just register for the ones you need by calling
* {@link #AbstractEventListener(EnabledEventCallback...)} instead. * {@link #AbstractEventListener(EnabledEventCallback...)} instead.
@ -106,8 +106,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen
*/ */
private static long packToLong(final EnabledEventCallback... enabledEventCallbacks) { private static long packToLong(final EnabledEventCallback... enabledEventCallbacks) {
long l = 0; long l = 0;
for (int i = 0; i < enabledEventCallbacks.length; i++) { for (final EnabledEventCallback enabledEventCallback : enabledEventCallbacks) {
l |= 1 << enabledEventCallbacks[i].getValue(); l |= 1L << enabledEventCallback.getValue();
} }
return l; return l;
} }

@ -53,25 +53,23 @@ public abstract class AbstractMutableOptions {
return buffer.toString(); return buffer.toString();
} }
public static abstract class AbstractMutableOptionsBuilder< public abstract static class AbstractMutableOptionsBuilder<
T extends AbstractMutableOptions, T extends AbstractMutableOptions, U extends AbstractMutableOptionsBuilder<T, U, K>, K
U extends AbstractMutableOptionsBuilder<T, U, K>, extends MutableOptionKey> {
K extends MutableOptionKey> {
private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>(); private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>();
private final List<OptionString.Entry> unknown = new ArrayList<>(); private final List<OptionString.Entry> unknown = new ArrayList<>();
protected abstract U self(); protected abstract U self();
/** /**
* Get all of the possible keys * Get all the possible keys
* *
* @return A map of all keys, indexed by name. * @return A map of all keys, indexed by name.
*/ */
protected abstract Map<String, K> allKeys(); protected abstract Map<String, K> allKeys();
/** /**
* Construct a sub-class instance of {@link AbstractMutableOptions}. * Construct a subclass instance of {@link AbstractMutableOptions}.
* *
* @param keys the keys * @param keys the keys
* @param values the values * @param values the values
@ -224,7 +222,7 @@ public abstract class AbstractMutableOptions {
private long parseAsLong(final String value) { private long parseAsLong(final String value) {
try { try {
return Long.parseLong(value); return Long.parseLong(value);
} catch (NumberFormatException nfe) { } catch (final NumberFormatException nfe) {
final double doubleValue = Double.parseDouble(value); final double doubleValue = Double.parseDouble(value);
if (doubleValue != Math.round(doubleValue)) if (doubleValue != Math.round(doubleValue))
throw new IllegalArgumentException("Unable to parse or round " + value + " to long"); throw new IllegalArgumentException("Unable to parse or round " + value + " to long");
@ -242,7 +240,7 @@ public abstract class AbstractMutableOptions {
private int parseAsInt(final String value) { private int parseAsInt(final String value) {
try { try {
return Integer.parseInt(value); return Integer.parseInt(value);
} catch (NumberFormatException nfe) { } catch (final NumberFormatException nfe) {
final double doubleValue = Double.parseDouble(value); final double doubleValue = Double.parseDouble(value);
if (doubleValue != Math.round(doubleValue)) if (doubleValue != Math.round(doubleValue))
throw new IllegalArgumentException("Unable to parse or round " + value + " to int"); throw new IllegalArgumentException("Unable to parse or round " + value + " to int");
@ -271,7 +269,7 @@ public abstract class AbstractMutableOptions {
throw new IllegalArgumentException("options string is invalid: " + option); throw new IllegalArgumentException("options string is invalid: " + option);
} }
fromOptionString(option, ignoreUnknown); fromOptionString(option, ignoreUnknown);
} catch (NumberFormatException nfe) { } catch (final NumberFormatException nfe) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"" + option.key + "=" + option.value + " - not a valid value for its type", nfe); "" + option.key + "=" + option.value + " - not a valid value for its type", nfe);
} }
@ -287,7 +285,7 @@ public abstract class AbstractMutableOptions {
* @param ignoreUnknown if this is not set, throw an exception when a key is not in the known * @param ignoreUnknown if this is not set, throw an exception when a key is not in the known
* set * set
* @return the same object, after adding options * @return the same object, after adding options
* @throws IllegalArgumentException if the key is unkown, or a value has the wrong type/form * @throws IllegalArgumentException if the key is unknown, or a value has the wrong type/form
*/ */
private U fromOptionString(final OptionString.Entry option, final boolean ignoreUnknown) private U fromOptionString(final OptionString.Entry option, final boolean ignoreUnknown)
throws IllegalArgumentException { throws IllegalArgumentException {
@ -299,7 +297,7 @@ public abstract class AbstractMutableOptions {
unknown.add(option); unknown.add(option);
return self(); return self();
} else if (key == null) { } else if (key == null) {
throw new IllegalArgumentException("Key: " + key + " is not a known option key"); throw new IllegalArgumentException("Key: " + null + " is not a known option key");
} }
if (!option.value.isList()) { if (!option.value.isList()) {
@ -341,7 +339,7 @@ public abstract class AbstractMutableOptions {
return setIntArray(key, value); return setIntArray(key, value);
case ENUM: case ENUM:
String optionName = key.name(); final String optionName = key.name();
if (optionName.equals("prepopulate_blob_cache")) { if (optionName.equals("prepopulate_blob_cache")) {
final PrepopulateBlobCache prepopulateBlobCache = final PrepopulateBlobCache prepopulateBlobCache =
PrepopulateBlobCache.getFromInternal(valueStr); PrepopulateBlobCache.getFromInternal(valueStr);

@ -16,8 +16,9 @@ package org.rocksdb;
* <a * <a
* href="https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">try-with-resources</a> * href="https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">try-with-resources</a>
* statement, when you are finished with the object. It is no longer * statement, when you are finished with the object. It is no longer
* called automatically during the regular Java GC process via * called automatically during the regular Java GC process via finalization
* {@link AbstractNativeReference#finalize()}.</p> * {@link AbstractNativeReference#finalize()}.</p>
* which is deprecated from Java 9.
* <p> * <p>
* Explanatory note - When or if the Garbage Collector calls {@link Object#finalize()} * Explanatory note - When or if the Garbage Collector calls {@link Object#finalize()}
* depends on the JVM implementation and system conditions, which the programmer * depends on the JVM implementation and system conditions, which the programmer

@ -8,7 +8,7 @@ package org.rocksdb;
/** /**
* Slices are used by RocksDB to provide * Slices are used by RocksDB to provide
* efficient access to keys and values. * efficient access to keys and values.
* * <p>
* This class is package private, implementers * This class is package private, implementers
* should extend either of the public abstract classes: * should extend either of the public abstract classes:
* @see org.rocksdb.Slice * @see org.rocksdb.Slice
@ -147,7 +147,7 @@ public abstract class AbstractSlice<T> extends RocksMutableObject {
*/ */
@Override @Override
public boolean equals(final Object other) { public boolean equals(final Object other) {
if (other != null && other instanceof AbstractSlice) { if (other instanceof AbstractSlice) {
return compare((AbstractSlice<?>)other) == 0; return compare((AbstractSlice<?>)other) == 0;
} else { } else {
return false; return false;
@ -172,7 +172,7 @@ public abstract class AbstractSlice<T> extends RocksMutableObject {
} }
} }
protected native static long createNewSliceFromString(final String str); protected static native long createNewSliceFromString(final String str);
private native int size0(long handle); private native int size0(long handle);
private native boolean empty0(long handle); private native boolean empty0(long handle);
private native String toString0(long handle, boolean hex); private native String toString0(long handle, boolean hex);
@ -183,7 +183,7 @@ public abstract class AbstractSlice<T> extends RocksMutableObject {
* Deletes underlying C++ slice pointer. * Deletes underlying C++ slice pointer.
* Note that this function should be called only after all * Note that this function should be called only after all
* RocksDB instances referencing the slice are closed. * RocksDB instances referencing the slice are closed.
* Otherwise an undefined behavior will occur. * Otherwise, an undefined behavior will occur.
*/ */
@Override @Override
protected final native void disposeInternal(final long handle); protected final native void disposeInternal(final long handle);

@ -62,7 +62,7 @@ public abstract class AbstractTraceWriter
private static short statusToShort(final Status.Code code, private static short statusToShort(final Status.Code code,
final Status.SubCode subCode) { final Status.SubCode subCode) {
short result = (short)(code.getValue() << 8); final short result = (short) (code.getValue() << 8);
return (short)(result | subCode.getValue()); return (short)(result | subCode.getValue());
} }

@ -41,10 +41,10 @@ public abstract class AbstractTransactionNotifier
/** /**
* Deletes underlying C++ TransactionNotifier pointer. * Deletes underlying C++ TransactionNotifier pointer.
* * <p>
* Note that this function should be called only after all * Note that this function should be called only after all
* Transactions referencing the comparator are closed. * Transactions referencing the comparator are closed.
* Otherwise an undefined behavior will occur. * Otherwise, an undefined behavior will occur.
*/ */
@Override @Override
protected void disposeInternal() { protected void disposeInternal() {

@ -41,7 +41,7 @@ public abstract class AbstractWalFilter
private static short logRecordFoundResultToShort( private static short logRecordFoundResultToShort(
final LogRecordFoundResult logRecordFoundResult) { final LogRecordFoundResult logRecordFoundResult) {
short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8); final short result = (short) (logRecordFoundResult.walProcessingOption.getValue() << 8);
return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0)); return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0));
} }

@ -20,25 +20,25 @@ public abstract class AbstractWriteBatch extends RocksObject
} }
@Override @Override
public void put(byte[] key, byte[] value) throws RocksDBException { public void put(final byte[] key, final byte[] value) throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length); put(nativeHandle_, key, key.length, value, value.length);
} }
@Override @Override
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value)
byte[] value) throws RocksDBException { throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length, put(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_); columnFamilyHandle.nativeHandle_);
} }
@Override @Override
public void merge(byte[] key, byte[] value) throws RocksDBException { public void merge(final byte[] key, final byte[] value) throws RocksDBException {
merge(nativeHandle_, key, key.length, value, value.length); merge(nativeHandle_, key, key.length, value, value.length);
} }
@Override @Override
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, public void merge(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
byte[] value) throws RocksDBException { final byte[] value) throws RocksDBException {
merge(nativeHandle_, key, key.length, value, value.length, merge(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_); columnFamilyHandle.nativeHandle_);
} }
@ -53,7 +53,7 @@ public abstract class AbstractWriteBatch extends RocksObject
} }
@Override @Override
public void put(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key,
final ByteBuffer value) throws RocksDBException { final ByteBuffer value) throws RocksDBException {
assert key.isDirect() && value.isDirect(); assert key.isDirect() && value.isDirect();
putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(), putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(),
@ -63,12 +63,12 @@ public abstract class AbstractWriteBatch extends RocksObject
} }
@Override @Override
public void delete(byte[] key) throws RocksDBException { public void delete(final byte[] key) throws RocksDBException {
delete(nativeHandle_, key, key.length); delete(nativeHandle_, key, key.length);
} }
@Override @Override
public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) public void delete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException { throws RocksDBException {
delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
} }
@ -80,7 +80,7 @@ public abstract class AbstractWriteBatch extends RocksObject
} }
@Override @Override
public void delete(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) public void delete(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key)
throws RocksDBException { throws RocksDBException {
deleteDirect( deleteDirect(
nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_); nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_);
@ -88,31 +88,30 @@ public abstract class AbstractWriteBatch extends RocksObject
} }
@Override @Override
public void singleDelete(byte[] key) throws RocksDBException { public void singleDelete(final byte[] key) throws RocksDBException {
singleDelete(nativeHandle_, key, key.length); singleDelete(nativeHandle_, key, key.length);
} }
@Override @Override
public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException { throws RocksDBException {
singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
} }
@Override @Override
public void deleteRange(byte[] beginKey, byte[] endKey) public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException {
throws RocksDBException {
deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length); deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length);
} }
@Override @Override
public void deleteRange(ColumnFamilyHandle columnFamilyHandle, public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey,
byte[] beginKey, byte[] endKey) throws RocksDBException { final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length, deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length,
columnFamilyHandle.nativeHandle_); columnFamilyHandle.nativeHandle_);
} }
@Override @Override
public void putLogData(byte[] blob) throws RocksDBException { public void putLogData(final byte[] blob) throws RocksDBException {
putLogData(nativeHandle_, blob, blob.length); putLogData(nativeHandle_, blob, blob.length);
} }

@ -9,12 +9,12 @@ import java.util.List;
/** /**
* Advanced Column Family Options which are not * Advanced Column Family Options which are not
* mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface} * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface})
* * <p>
* Taken from include/rocksdb/advanced_options.h * Taken from include/rocksdb/advanced_options.h
*/ */
public interface AdvancedColumnFamilyOptionsInterface< public interface AdvancedColumnFamilyOptionsInterface<
T extends AdvancedColumnFamilyOptionsInterface<T>> { T extends AdvancedColumnFamilyOptionsInterface<T> & ColumnFamilyOptionsInterface<T>> {
/** /**
* The minimum number of write buffers that will be merged together * The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then * before writing to storage. If set to 1, then
@ -51,23 +51,23 @@ public interface AdvancedColumnFamilyOptionsInterface<
* this parameter does not affect flushing. * this parameter does not affect flushing.
* This controls the minimum amount of write history that will be available * This controls the minimum amount of write history that will be available
* in memory for conflict checking when Transactions are used. * in memory for conflict checking when Transactions are used.
* * <p>
* When using an OptimisticTransactionDB: * When using an OptimisticTransactionDB:
* If this value is too low, some transactions may fail at commit time due * If this value is too low, some transactions may fail at commit time due
* to not being able to determine whether there were any write conflicts. * to not being able to determine whether there were any write conflicts.
* * <p>
* When using a TransactionDB: * When using a TransactionDB:
* If Transaction::SetSnapshot is used, TransactionDB will read either * If Transaction::SetSnapshot is used, TransactionDB will read either
* in-memory write buffers or SST files to do write-conflict checking. * in-memory write buffers or SST files to do write-conflict checking.
* Increasing this value can reduce the number of reads to SST files * Increasing this value can reduce the number of reads to SST files
* done for conflict detection. * done for conflict detection.
* * <p>
* Setting this value to 0 will cause write buffers to be freed immediately * Setting this value to 0 will cause write buffers to be freed immediately
* after they are flushed. * after they are flushed.
* If this value is set to -1, * If this value is set to -1,
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
* will be used. * will be used.
* * <p>
* Default: * Default:
* If using a TransactionDB/OptimisticTransactionDB, the default value will * If using a TransactionDB/OptimisticTransactionDB, the default value will
* be set to the value of * be set to the value of
@ -336,14 +336,13 @@ public interface AdvancedColumnFamilyOptionsInterface<
/** /**
* Set compaction style for DB. * Set compaction style for DB.
* * <p>
* Default: LEVEL. * Default: LEVEL.
* *
* @param compactionStyle Compaction style. * @param compactionStyle Compaction style.
* @return the reference to the current options. * @return the reference to the current options.
*/ */
ColumnFamilyOptionsInterface setCompactionStyle( ColumnFamilyOptionsInterface<T> setCompactionStyle(CompactionStyle compactionStyle);
CompactionStyle compactionStyle);
/** /**
* Compaction style for DB. * Compaction style for DB.
@ -355,7 +354,7 @@ public interface AdvancedColumnFamilyOptionsInterface<
/** /**
* If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL}, * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL},
* for each level, which files are prioritized to be picked to compact. * for each level, which files are prioritized to be picked to compact.
* * <p>
* Default: {@link CompactionPriority#ByCompensatedSize} * Default: {@link CompactionPriority#ByCompensatedSize}
* *
* @param compactionPriority The compaction priority * @param compactionPriority The compaction priority
@ -444,7 +443,7 @@ public interface AdvancedColumnFamilyOptionsInterface<
* By default, RocksDB runs consistency checks on the LSM every time the LSM * By default, RocksDB runs consistency checks on the LSM every time the LSM
* changes (Flush, Compaction, AddFile). Use this option if you need to * changes (Flush, Compaction, AddFile). Use this option if you need to
* disable them. * disable them.
* * <p>
* Default: true * Default: true
* *
* @param forceConsistencyChecks false to disable consistency checks * @param forceConsistencyChecks false to disable consistency checks

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* Advanced Column Family Options which are mutable * Advanced Column Family Options which are mutable
* * <p>
* Taken from include/rocksdb/advanced_options.h * Taken from include/rocksdb/advanced_options.h
* and MutableCFOptions in util/cf_options.h * and MutableCFOptions in util/cf_options.h
*/ */
@ -58,8 +58,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of * create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio. * write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25. * If it is larger than 0.25, it is sanitized to 0.25.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @param memtablePrefixBloomSizeRatio the ratio of memtable used by the * @param memtablePrefixBloomSizeRatio the ratio of memtable used by the
@ -73,8 +73,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of * create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio. * write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25. * If it is larger than 0.25, it is sanitized to 0.25.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @return the ratio of memtable used by the bloom filter * @return the ratio of memtable used by the bloom filter
@ -85,7 +85,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Threshold used in the MemPurge (memtable garbage collection) * Threshold used in the MemPurge (memtable garbage collection)
* feature. A value of 0.0 corresponds to no MemPurge, * feature. A value of 0.0 corresponds to no MemPurge,
* a value of 1.0 will trigger a MemPurge as often as possible. * a value of 1.0 will trigger a MemPurge as often as possible.
* * <p>
* Default: 0.0 (disabled) * Default: 0.0 (disabled)
* *
* @param experimentalMempurgeThreshold the threshold used by * @param experimentalMempurgeThreshold the threshold used by
@ -98,7 +98,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Threshold used in the MemPurge (memtable garbage collection) * Threshold used in the MemPurge (memtable garbage collection)
* feature. A value of 0.0 corresponds to no MemPurge, * feature. A value of 0.0 corresponds to no MemPurge,
* a value of 1.0 will trigger a MemPurge as often as possible. * a value of 1.0 will trigger a MemPurge as often as possible.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @return the threshold used by the MemPurge decider * @return the threshold used by the MemPurge decider
@ -109,7 +109,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Enable whole key bloom filter in memtable. Note this will only take effect * Enable whole key bloom filter in memtable. Note this will only take effect
* if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering * if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
* can potentially reduce CPU usage for point-look-ups. * can potentially reduce CPU usage for point-look-ups.
* * <p>
* Default: false (disabled) * Default: false (disabled)
* *
* @param memtableWholeKeyFiltering true if whole key bloom filter is enabled * @param memtableWholeKeyFiltering true if whole key bloom filter is enabled
@ -154,12 +154,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* The size of one block in arena memory allocation. * The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of * If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size). * writer_buffer_size).
* * <p>
* There are two additional restriction of the specified size: * There are two additional restriction of the specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory * (2) be the multiple of the CPU word (which helps with the memory
* alignment). * alignment).
* * <p>
* We'll automatically check and adjust the size number to make sure it * We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions. * conforms to the restrictions.
* Default: 0 * Default: 0
@ -175,12 +175,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* The size of one block in arena memory allocation. * The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of * If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size). * writer_buffer_size).
* * <p>
* There are two additional restriction of the specified size: * There are two additional restriction of the specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory * (2) be the multiple of the CPU word (which helps with the memory
* alignment). * alignment).
* * <p>
* We'll automatically check and adjust the size number to make sure it * We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions. * conforms to the restrictions.
* Default: 0 * Default: 0
@ -294,7 +294,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* @param multiplier the ratio between the total size of level-(L+1) * @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L. * files and the total size of level-L files for all L.
* @return the reference to the current options. * @return the reference to the current options.
* * <p>
* See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)} * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
*/ */
T setMaxBytesForLevelMultiplier(double multiplier); T setMaxBytesForLevelMultiplier(double multiplier);
@ -306,7 +306,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* *
* @return the ratio between the total size of level-(L+1) files and * @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L. * the total size of level-L files for all L.
* * <p>
* See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()} * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
*/ */
double maxBytesForLevelMultiplier(); double maxBytesForLevelMultiplier();
@ -315,7 +315,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Different max-size multipliers for different levels. * Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive * These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level. * at the max-size of each level.
* * <p>
* Default: 1 * Default: 1
* *
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers * @param maxBytesForLevelMultiplierAdditional The max-size multipliers
@ -329,7 +329,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Different max-size multipliers for different levels. * Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive * These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level. * at the max-size of each level.
* * <p>
* Default: 1 * Default: 1
* *
* @return The max-size multipliers for each level * @return The max-size multipliers for each level
@ -339,7 +339,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* All writes will be slowed down to at least delayed_write_rate if estimated * All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold. * bytes needed to be compaction exceed this threshold.
* * <p>
* Default: 64GB * Default: 64GB
* *
* @param softPendingCompactionBytesLimit The soft limit to impose on * @param softPendingCompactionBytesLimit The soft limit to impose on
@ -352,7 +352,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* All writes will be slowed down to at least delayed_write_rate if estimated * All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold. * bytes needed to be compaction exceed this threshold.
* * <p>
* Default: 64GB * Default: 64GB
* *
* @return The soft limit to impose on compaction * @return The soft limit to impose on compaction
@ -362,7 +362,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* All writes are stopped if estimated bytes needed to be compaction exceed * All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold. * this threshold.
* * <p>
* Default: 256GB * Default: 256GB
* *
* @param hardPendingCompactionBytesLimit The hard limit to impose on * @param hardPendingCompactionBytesLimit The hard limit to impose on
@ -375,7 +375,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* All writes are stopped if estimated bytes needed to be compaction exceed * All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold. * this threshold.
* * <p>
* Default: 256GB * Default: 256GB
* *
* @return The hard limit to impose on compaction * @return The hard limit to impose on compaction
@ -390,7 +390,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Default: 8 * Default: 8
* *
* @param maxSequentialSkipInIterations the number of keys could * @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration. * be skipped in an iteration.
* @return the reference to the current options. * @return the reference to the current options.
*/ */
T setMaxSequentialSkipInIterations( T setMaxSequentialSkipInIterations(
@ -403,19 +403,19 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* skipped before a reseek is issued. * skipped before a reseek is issued.
* Default: 8 * Default: 8
* *
* @return the number of keys could be skipped in a iteration. * @return the number of keys could be skipped in an iteration.
*/ */
long maxSequentialSkipInIterations(); long maxSequentialSkipInIterations();
/** /**
* Maximum number of successive merge operations on a key in the memtable. * Maximum number of successive merge operations on a key in the memtable.
* * <p>
* When a merge operation is added to the memtable and the maximum number of * When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and * successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will * inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge * ensure that there are never more than max_successive_merges merge
* operations in the memtable. * operations in the memtable.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @param maxSuccessiveMerges the maximum number of successive merges. * @param maxSuccessiveMerges the maximum number of successive merges.
@ -428,13 +428,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Maximum number of successive merge operations on a key in the memtable. * Maximum number of successive merge operations on a key in the memtable.
* * <p>
* When a merge operation is added to the memtable and the maximum number of * When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and * successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will * inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge * ensure that there are never more than max_successive_merges merge
* operations in the memtable. * operations in the memtable.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @return the maximum number of successive merges. * @return the maximum number of successive merges.
@ -443,7 +443,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* After writing every SST file, reopen it and read all the keys. * After writing every SST file, reopen it and read all the keys.
* * <p>
* Default: false * Default: false
* *
* @param paranoidFileChecks true to enable paranoid file checks * @param paranoidFileChecks true to enable paranoid file checks
@ -454,7 +454,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* After writing every SST file, reopen it and read all the keys. * After writing every SST file, reopen it and read all the keys.
* * <p>
* Default: false * Default: false
* *
* @return true if paranoid file checks are enabled * @return true if paranoid file checks are enabled
@ -463,7 +463,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Measure IO stats in compactions and flushes, if true. * Measure IO stats in compactions and flushes, if true.
* * <p>
* Default: false * Default: false
* *
* @param reportBgIoStats true to enable reporting * @param reportBgIoStats true to enable reporting
@ -483,11 +483,11 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Non-bottom-level files older than TTL will go through the compaction * Non-bottom-level files older than TTL will go through the compaction
* process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
* set to -1. * set to -1.
* * <p>
* Enabled only for level compaction for now. * Enabled only for level compaction for now.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -500,7 +500,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Get the TTL for Non-bottom-level files that will go through the compaction * Get the TTL for Non-bottom-level files that will go through the compaction
* process. * process.
* * <p>
* See {@link #setTtl(long)}. * See {@link #setTtl(long)}.
* *
* @return the time-to-live. * @return the time-to-live.
@ -513,18 +513,18 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* One main use of the feature is to make sure a file goes through compaction * One main use of the feature is to make sure a file goes through compaction
* filters periodically. Users can also use the feature to clear up SST * filters periodically. Users can also use the feature to clear up SST
* files using old format. * files using old format.
* * <p>
* A file's age is computed by looking at file_creation_time or creation_time * A file's age is computed by looking at file_creation_time or creation_time
* table properties in order, if they have valid non-zero values; if not, the * table properties in order, if they have valid non-zero values; if not, the
* age is based on the file's last modified time (given by the underlying * age is based on the file's last modified time (given by the underlying
* Env). * Env).
* * <p>
* Supported in Level and FIFO compaction. * Supported in Level and FIFO compaction.
* In FIFO compaction, this option has the same meaning as TTL and whichever * In FIFO compaction, this option has the same meaning as TTL and whichever
* stricter will be used. * stricter will be used.
* Pre-req: max_open_file == -1. * Pre-req: max_open_file == -1.
* unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60 * unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
* * <p>
* Values: * Values:
* 0: Turn off Periodic compactions. * 0: Turn off Periodic compactions.
* UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature * UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature
@ -534,9 +534,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* In FIFO compaction, since the option has the same meaning as ttl, * In FIFO compaction, since the option has the same meaning as ttl,
* when this value is left default, and ttl is left to 0, 30 days will be * when this value is left default, and ttl is left to 0, 30 days will be
* used. Otherwise, min(ttl, periodic_compaction_seconds) will be used. * used. Otherwise, min(ttl, periodic_compaction_seconds) will be used.
* * <p>
* Default: 0xfffffffffffffffe (allow RocksDB to auto-tune) * Default: 0xfffffffffffffffe (allow RocksDB to auto-tune)
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -548,7 +548,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Get the periodicCompactionSeconds. * Get the periodicCompactionSeconds.
* * <p>
* See {@link #setPeriodicCompactionSeconds(long)}. * See {@link #setPeriodicCompactionSeconds(long)}.
* *
* @return the periodic compaction in seconds. * @return the periodic compaction in seconds.
@ -566,9 +566,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* for reads. See also the options min_blob_size, blob_file_size, * for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and * blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below. * blob_garbage_collection_age_cutoff below.
* * <p>
* Default: false * Default: false
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -585,9 +585,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* for reads. See also the options min_blob_size, blob_file_size, * for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and * blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below. * blob_garbage_collection_age_cutoff below.
* * <p>
* Default: false * Default: false
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -601,9 +601,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* alongside the keys in SST files in the usual fashion. A value of zero for * alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that * this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect. * enable_blob_files has to be set in order for this option to have any effect.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -618,9 +618,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* alongside the keys in SST files in the usual fashion. A value of zero for * alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that * this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect. * enable_blob_files has to be set in order for this option to have any effect.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -632,9 +632,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Set the size limit for blob files. When writing blob files, a new file is opened * Set the size limit for blob files. When writing blob files, a new file is opened
* once this limit is reached. Note that enable_blob_files has to be set in * once this limit is reached. Note that enable_blob_files has to be set in
* order for this option to have any effect. * order for this option to have any effect.
* * <p>
* Default: 256 MB * Default: 256 MB
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -656,9 +656,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Set the compression algorithm to use for large values stored in blob files. Note * Set the compression algorithm to use for large values stored in blob files. Note
* that enable_blob_files has to be set in order for this option to have any * that enable_blob_files has to be set in order for this option to have any
* effect. * effect.
* * <p>
* Default: no compression * Default: no compression
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -683,7 +683,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* relocated to new files as they are encountered during compaction, which makes * relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but * it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
* * <p>
* Default: false * Default: false
* *
* @param enableBlobGarbageCollection the new enabled/disabled state of blob garbage collection * @param enableBlobGarbageCollection the new enabled/disabled state of blob garbage collection
@ -698,7 +698,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* relocated to new files as they are encountered during compaction, which makes * relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but * it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
* * <p>
* Default: false * Default: false
* *
* @return true if blob garbage collection is currently enabled. * @return true if blob garbage collection is currently enabled.
@ -711,7 +711,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* where N = garbage_collection_cutoff * number_of_blob_files. Note that * where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have * enable_blob_garbage_collection has to be set in order for this option to have
* any effect. * any effect.
* * <p>
* Default: 0.25 * Default: 0.25
* *
* @param blobGarbageCollectionAgeCutoff the new age cutoff * @param blobGarbageCollectionAgeCutoff the new age cutoff
@ -725,7 +725,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* where N = garbage_collection_cutoff * number_of_blob_files. Note that * where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have * enable_blob_garbage_collection has to be set in order for this option to have
* any effect. * any effect.
* * <p>
* Default: 0.25 * Default: 0.25
* *
* @return the current age cutoff for garbage collection * @return the current age cutoff for garbage collection
@ -738,12 +738,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* the blob files in question, assuming they are all eligible based on the * the blob files in question, assuming they are all eligible based on the
* value of {@link #blobGarbageCollectionAgeCutoff} above. This option is * value of {@link #blobGarbageCollectionAgeCutoff} above. This option is
* currently only supported with leveled compactions. * currently only supported with leveled compactions.
* * <p>
* Note that {@link #enableBlobGarbageCollection} has to be set in order for this * Note that {@link #enableBlobGarbageCollection} has to be set in order for this
* option to have any effect. * option to have any effect.
* * <p>
* Default: 1.0 * Default: 1.0
* * <p>
* Dynamically changeable through the SetOptions() API * Dynamically changeable through the SetOptions() API
* *
* @param blobGarbageCollectionForceThreshold new value for the threshold * @param blobGarbageCollectionForceThreshold new value for the threshold
@ -752,16 +752,16 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
T setBlobGarbageCollectionForceThreshold(double blobGarbageCollectionForceThreshold); T setBlobGarbageCollectionForceThreshold(double blobGarbageCollectionForceThreshold);
/** /**
* Get the current value for the {@link #blobGarbageCollectionForceThreshold} * Get the current value for the {@code #blobGarbageCollectionForceThreshold}
* @return the current threshold at which garbage collection of blobs is forced * @return the current threshold at which garbage collection of blobs is forced
*/ */
double blobGarbageCollectionForceThreshold(); double blobGarbageCollectionForceThreshold();
/** /**
* Set compaction readahead for blob files. * Set compaction readahead for blob files.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -780,9 +780,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Set a certain LSM tree level to enable blob files. * Set a certain LSM tree level to enable blob files.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
@ -794,7 +794,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Get the starting LSM tree level to enable blob files. * Get the starting LSM tree level to enable blob files.
* * <p>
* Default: 0 * Default: 0
* *
* @return the current LSM tree level to enable blob files. * @return the current LSM tree level to enable blob files.
@ -803,13 +803,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Set a certain prepopulate blob cache option. * Set a certain prepopulate blob cache option.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
* @param prepopulateBlobCache the prepopulate blob cache option * @param prepopulateBlobCache prepopulate the blob cache option
* *
* @return the reference to the current options. * @return the reference to the current options.
*/ */
@ -817,7 +817,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/** /**
* Get the prepopulate blob cache option. * Get the prepopulate blob cache option.
* * <p>
* Default: 0 * Default: 0
* *
* @return the current prepopulate blob cache option. * @return the current prepopulate blob cache option.

@ -9,7 +9,7 @@ import java.util.List;
/** /**
* BackupEngine allows you to backup * BackupEngine allows you to backup
* and restore the database * and restore the database
* * <p>
* Be aware, that `new BackupEngine` takes time proportional to the amount * Be aware, that `new BackupEngine` takes time proportional to the amount
* of backups. So if you have a slow filesystem to backup * of backups. So if you have a slow filesystem to backup
* and you have a lot of backups then restoring can take some time. * and you have a lot of backups then restoring can take some time.
@ -39,12 +39,12 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
/** /**
* Captures the state of the database in the latest backup * Captures the state of the database in the latest backup
* * <p>
* Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with * Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with
* the flushBeforeBackup parameter set to false * the flushBeforeBackup parameter set to false
* *
* @param db The database to backup * @param db The database to backup
* * <p>
* Note - This method is not thread safe * Note - This method is not thread safe
* *
* @throws RocksDBException thrown if a new backup could not be created * @throws RocksDBException thrown if a new backup could not be created
@ -72,7 +72,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* always be consistent with the current state of the * always be consistent with the current state of the
* database regardless of the flushBeforeBackup * database regardless of the flushBeforeBackup
* parameter. * parameter.
* * <p>
* Note - This method is not thread safe * Note - This method is not thread safe
* *
* @throws RocksDBException thrown if a new backup could not be created * @throws RocksDBException thrown if a new backup could not be created
@ -105,7 +105,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* always be consistent with the current state of the * always be consistent with the current state of the
* database regardless of the flushBeforeBackup * database regardless of the flushBeforeBackup
* parameter. * parameter.
* * <p>
* Note - This method is not thread safe * Note - This method is not thread safe
* *
* @throws RocksDBException thrown if a new backup could not be created * @throws RocksDBException thrown if a new backup could not be created
@ -179,11 +179,11 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
/** /**
* Restore the database from a backup * Restore the database from a backup
* * <p>
* IMPORTANT: if options.share_table_files == true and you restore the DB * IMPORTANT: if options.share_table_files == true and you restore the DB
* from some backup that is not the latest, and you start creating new * from some backup that is not the latest, and you start creating new
* backups from the new DB, they will probably fail! * backups from the new DB, they will probably fail!
* * <p>
* Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. * Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
* If you add new data to the DB and try creating a new backup now, the * If you add new data to the DB and try creating a new backup now, the
* database will diverge from backups 4 and 5 and the new backup will fail. * database will diverge from backups 4 and 5 and the new backup will fail.
@ -226,7 +226,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
restoreOptions.nativeHandle_); restoreOptions.nativeHandle_);
} }
private native static long open(final long env, final long backupEngineOptions) private static native long open(final long env, final long backupEngineOptions)
throws RocksDBException; throws RocksDBException;
private native void createNewBackup(final long handle, final long dbHandle, private native void createNewBackup(final long handle, final long dbHandle,

@ -25,7 +25,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* <p>BackupEngineOptions constructor.</p> * <p>BackupEngineOptions constructor.</p>
* *
* @param path Where to keep the backup files. Has to be different than db * @param path Where to keep the backup files. Has to be different from db
* name. Best to set this to {@code db name_ + "/backups"} * name. Best to set this to {@code db name_ + "/backups"}
* @throws java.lang.IllegalArgumentException if illegal path is used. * @throws java.lang.IllegalArgumentException if illegal path is used.
*/ */
@ -55,9 +55,9 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Backup Env object. It will be used for backup file I/O. If it's * Backup Env object. It will be used for backup file I/O. If it's
* null, backups will be written out using DBs Env. Otherwise * null, backups will be written out using DBs Env. Otherwise,
* backup's I/O will be performed using this object. * backup's I/O will be performed using this object.
* * <p>
* Default: null * Default: null
* *
* @param env The environment to use * @param env The environment to use
@ -72,9 +72,9 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Backup Env object. It will be used for backup file I/O. If it's * Backup Env object. It will be used for backup file I/O. If it's
* null, backups will be written out using DBs Env. Otherwise * null, backups will be written out using DBs Env. Otherwise,
* backup's I/O will be performed using this object. * backup's I/O will be performed using this object.
* * <p>
* Default: null * Default: null
* *
* @return The environment in use * @return The environment in use
@ -128,7 +128,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Set the logger to use for Backup info and error messages * Set the logger to use for Backup info and error messages
* * <p>
* Default: null * Default: null
* *
* @return The logger in use for the backup * @return The logger in use for the backup
@ -143,7 +143,7 @@ public class BackupEngineOptions extends RocksObject {
* @param sync If {@code sync == true}, we can guarantee you'll get consistent * @param sync If {@code sync == true}, we can guarantee you'll get consistent
* backup even on a machine crash/reboot. Backup process is slower with sync * backup even on a machine crash/reboot. Backup process is slower with sync
* enabled. If {@code sync == false}, we don't guarantee anything on machine * enabled. If {@code sync == false}, we don't guarantee anything on machine
* reboot. However, chances are some of the backups are consistent. * reboot. However, chances are some backups are consistent.
* *
* <p>Default: true</p> * <p>Default: true</p>
* *
@ -194,7 +194,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* <p>Set if log files shall be persisted.</p> * <p>Set if log files shall be persisted.</p>
* *
* @param backupLogFiles If false, we won't backup log files. This option can * @param backupLogFiles If false, we won't back up log files. This option can
* be useful for backing up in-memory databases where log file are * be useful for backing up in-memory databases where log file are
* persisted, but table files are in memory. * persisted, but table files are in memory.
* *
@ -250,7 +250,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Backup rate limiter. Used to control transfer speed for backup. If this is * Backup rate limiter. Used to control transfer speed for backup. If this is
* not null, {@link #backupRateLimit()} is ignored. * not null, {@link #backupRateLimit()} is ignored.
* * <p>
* Default: null * Default: null
* *
* @param backupRateLimiter The rate limiter to use for the backup * @param backupRateLimiter The rate limiter to use for the backup
@ -266,7 +266,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Backup rate limiter. Used to control transfer speed for backup. If this is * Backup rate limiter. Used to control transfer speed for backup. If this is
* not null, {@link #backupRateLimit()} is ignored. * not null, {@link #backupRateLimit()} is ignored.
* * <p>
* Default: null * Default: null
* *
* @return The rate limiter in use for the backup * @return The rate limiter in use for the backup
@ -308,7 +308,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Restore rate limiter. Used to control transfer speed during restore. If * Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored. * this is not null, {@link #restoreRateLimit()} is ignored.
* * <p>
* Default: null * Default: null
* *
* @param restoreRateLimiter The rate limiter to use during restore * @param restoreRateLimiter The rate limiter to use during restore
@ -324,7 +324,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* Restore rate limiter. Used to control transfer speed during restore. If * Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored. * this is not null, {@link #restoreRateLimit()} is ignored.
* * <p>
* Default: null * Default: null
* *
* @return The rate limiter in use during restore * @return The rate limiter in use during restore
@ -400,7 +400,7 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* During backup user can get callback every time next * During backup user can get callback every time next
* {@link #callbackTriggerIntervalSize()} bytes being copied. * {@link #callbackTriggerIntervalSize()} bytes being copied.
* * <p>
* Default: 4194304 * Default: 4194304
* *
* @param callbackTriggerIntervalSize The interval size for the * @param callbackTriggerIntervalSize The interval size for the
@ -416,8 +416,8 @@ public class BackupEngineOptions extends RocksObject {
/** /**
* During backup user can get callback every time next * During backup user can get callback every time next
* {@link #callbackTriggerIntervalSize()} bytes being copied. * {@code #callbackTriggerIntervalSize()} bytes being copied.
* * <p>
* Default: 4194304 * Default: 4194304
* *
* @return The interval size for the callback trigger * @return The interval size for the callback trigger
@ -427,7 +427,7 @@ public class BackupEngineOptions extends RocksObject {
return callbackTriggerIntervalSize(nativeHandle_); return callbackTriggerIntervalSize(nativeHandle_);
} }
private native static long newBackupEngineOptions(final String path); private static native long newBackupEngineOptions(final String path);
private native String backupDir(long handle); private native String backupDir(long handle);
private native void setBackupEnv(final long handle, final long envHandle); private native void setBackupEnv(final long handle, final long envHandle);
private native void setShareTableFiles(long handle, boolean flag); private native void setShareTableFiles(long handle, boolean flag);

@ -68,9 +68,9 @@ public class BackupInfo {
return app_metadata_; return app_metadata_;
} }
private int backupId_; private final int backupId_;
private long timestamp_; private final long timestamp_;
private long size_; private final long size_;
private int numberFiles_; private final int numberFiles_;
private String app_metadata_; private final String app_metadata_;
} }

@ -6,10 +6,10 @@ package org.rocksdb;
/** /**
* The config for plain table sst format. * The config for plain table sst format.
* * <p>
* BlockBasedTable is a RocksDB's default SST file format. * BlockBasedTable is a RocksDB's default SST file format.
*/ */
//TODO(AR) should be renamed BlockBasedTableOptions // TODO(AR) should be renamed BlockBasedTableOptions
public class BlockBasedTableConfig extends TableFormatConfig { public class BlockBasedTableConfig extends TableFormatConfig {
public BlockBasedTableConfig() { public BlockBasedTableConfig() {
@ -243,7 +243,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Disable block cache. If this is set to true, * Disable block cache. If this is set to true,
* then no block cache should be used, and the {@link #setBlockCache(Cache)} * then no block cache should be used, and the {@link #setBlockCache(Cache)}
* should point to a {@code null} object. * should point to a {@code null} object.
* * <p>
* Default: false * Default: false
* *
* @param noBlockCache if use block cache * @param noBlockCache if use block cache
@ -257,10 +257,10 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use the specified cache for blocks. * Use the specified cache for blocks.
* When not null this take precedence even if the user sets a block cache size. * When not null this take precedence even if the user sets a block cache size.
* * <p>
* {@link org.rocksdb.Cache} should not be disposed before options instances * {@link org.rocksdb.Cache} should not be disposed before options instances
* using this cache is disposed. * using this cache is disposed.
* * <p>
* {@link org.rocksdb.Cache} instance can be re-used in multiple options * {@link org.rocksdb.Cache} instance can be re-used in multiple options
* instances. * instances.
* *
@ -276,7 +276,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use the specified persistent cache. * Use the specified persistent cache.
* * <p>
* If {@code !null} use the specified cache for pages read from device, * If {@code !null} use the specified cache for pages read from device,
* otherwise no page cache is used. * otherwise no page cache is used.
* *
@ -327,7 +327,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* is less than this specified number and adding a new record to the block * is less than this specified number and adding a new record to the block
* will exceed the configured block size, then this block will be closed and * will exceed the configured block size, then this block will be closed and
* the new record will be written to the next block. * the new record will be written to the next block.
* * <p>
* Default is 10. * Default is 10.
* *
* @param blockSizeDeviation the deviation to block size allowed * @param blockSizeDeviation the deviation to block size allowed
@ -414,7 +414,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use partitioned full filters for each SST file. This option is incompatible * Use partitioned full filters for each SST file. This option is incompatible
* with block-based filters. * with block-based filters.
* * <p>
* Defaults to false. * Defaults to false.
* *
* @param partitionFilters use partition filters. * @param partitionFilters use partition filters.
@ -428,7 +428,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/*** /***
* Option to generate Bloom filters that minimize memory * Option to generate Bloom filters that minimize memory
* internal fragmentation. * internal fragmentation.
* * <p>
* See {@link #setOptimizeFiltersForMemory(boolean)}. * See {@link #setOptimizeFiltersForMemory(boolean)}.
* *
* @return true if bloom filters are used to minimize memory internal * @return true if bloom filters are used to minimize memory internal
@ -442,7 +442,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Option to generate Bloom filters that minimize memory * Option to generate Bloom filters that minimize memory
* internal fragmentation. * internal fragmentation.
* * <p>
* When false, malloc_usable_size is not available, or format_version &lt; 5, * When false, malloc_usable_size is not available, or format_version &lt; 5,
* filters are generated without regard to internal fragmentation when * filters are generated without regard to internal fragmentation when
* loaded into memory (historical behavior). When true (and * loaded into memory (historical behavior). When true (and
@ -452,21 +452,21 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* the reading DB has the same memory allocation characteristics as the * the reading DB has the same memory allocation characteristics as the
* generating DB. This option does not break forward or backward * generating DB. This option does not break forward or backward
* compatibility. * compatibility.
* * <p>
* While individual filters will vary in bits/key and false positive rate * While individual filters will vary in bits/key and false positive rate
* when setting is true, the implementation attempts to maintain a weighted * when setting is true, the implementation attempts to maintain a weighted
* average FP rate for filters consistent with this option set to false. * average FP rate for filters consistent with this option set to false.
* * <p>
* With Jemalloc for example, this setting is expected to save about 10% of * With Jemalloc for example, this setting is expected to save about 10% of
* the memory footprint and block cache charge of filters, while increasing * the memory footprint and block cache charge of filters, while increasing
* disk usage of filters by about 1-2% due to encoding efficiency losses * disk usage of filters by about 1-2% due to encoding efficiency losses
* with variance in bits/key. * with variance in bits/key.
* * <p>
* NOTE: Because some memory counted by block cache might be unmapped pages * NOTE: Because some memory counted by block cache might be unmapped pages
* within internal fragmentation, this option can increase observed RSS * within internal fragmentation, this option can increase observed RSS
* memory usage. With {@link #cacheIndexAndFilterBlocks()} == true, * memory usage. With {@link #cacheIndexAndFilterBlocks()} == true,
* this option makes the block cache better at using space it is allowed. * this option makes the block cache better at using space it is allowed.
* * <p>
* NOTE: Do not set to true if you do not trust malloc_usable_size. With * NOTE: Do not set to true if you do not trust malloc_usable_size. With
* this option, RocksDB might access an allocated memory object beyond its * this option, RocksDB might access an allocated memory object beyond its
* original size if malloc_usable_size says it is safe to do so. While this * original size if malloc_usable_size says it is safe to do so. While this
@ -495,9 +495,9 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use delta encoding to compress keys in blocks. * Use delta encoding to compress keys in blocks.
* * <p>
* NOTE: {@link ReadOptions#pinData()} requires this option to be disabled. * NOTE: {@link ReadOptions#pinData()} requires this option to be disabled.
* * <p>
* Default: true * Default: true
* *
* @param useDeltaEncoding true to enable delta encoding * @param useDeltaEncoding true to enable delta encoding
@ -521,10 +521,10 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Use the specified filter policy to reduce disk reads. * Use the specified filter policy to reduce disk reads.
* * <p>
* {@link org.rocksdb.Filter} should not be closed before options instances * {@link org.rocksdb.Filter} should not be closed before options instances
* using this filter are closed. * using this filter are closed.
* * <p>
* {@link org.rocksdb.Filter} instance can be re-used in multiple options * {@link org.rocksdb.Filter} instance can be re-used in multiple options
* instances. * instances.
* *
@ -576,7 +576,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Returns true when compression verification is enabled. * Returns true when compression verification is enabled.
* * <p>
* See {@link #setVerifyCompression(boolean)}. * See {@link #setVerifyCompression(boolean)}.
* *
* @return true if compression verification is enabled. * @return true if compression verification is enabled.
@ -602,7 +602,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Get the Read amplification bytes per-bit. * Get the Read amplification bytes per-bit.
* * <p>
* See {@link #setReadAmpBytesPerBit(int)}. * See {@link #setReadAmpBytesPerBit(int)}.
* *
* @return the bytes per-bit. * @return the bytes per-bit.
@ -613,27 +613,27 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Set the Read amplification bytes per-bit. * Set the Read amplification bytes per-bit.
* * <p>
* If used, For every data block we load into memory, we will create a bitmap * If used, For every data block we load into memory, we will create a bitmap
* of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap
* will be used to figure out the percentage we actually read of the blocks. * will be used to figure out the percentage we actually read of the blocks.
* * <p>
* When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and * When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and
* Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the
* read amplification using this formula * read amplification using this formula
* (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
* * <p>
* value =&gt; memory usage (percentage of loaded blocks memory) * value =&gt; memory usage (percentage of loaded blocks memory)
* 1 =&gt; 12.50 % * 1 =&gt; 12.50 %
* 2 =&gt; 06.25 % * 2 =&gt; 06.25 %
* 4 =&gt; 03.12 % * 4 =&gt; 03.12 %
* 8 =&gt; 01.56 % * 8 =&gt; 01.56 %
* 16 =&gt; 00.78 % * 16 =&gt; 00.78 %
* * <p>
* Note: This number must be a power of 2, if not it will be sanitized * Note: This number must be a power of 2, if not it will be sanitized
* to be the next lowest power of 2, for example a value of 7 will be * to be the next lowest power of 2, for example a value of 7 will be
* treated as 4, a value of 19 will be treated as 16. * treated as 4, a value of 19 will be treated as 16.
* * <p>
* Default: 0 (disabled) * Default: 0 (disabled)
* *
* @param readAmpBytesPerBit the bytes per-bit * @param readAmpBytesPerBit the bytes per-bit
@ -699,7 +699,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Determine if index compression is enabled. * Determine if index compression is enabled.
* * <p>
* See {@link #setEnableIndexCompression(boolean)}. * See {@link #setEnableIndexCompression(boolean)}.
* *
* @return true if index compression is enabled, false otherwise * @return true if index compression is enabled, false otherwise
@ -710,7 +710,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/** /**
* Store index blocks on disk in compressed format. * Store index blocks on disk in compressed format.
* * <p>
* Changing this option to false will avoid the overhead of decompression * Changing this option to false will avoid the overhead of decompression
* if index blocks are evicted and read back. * if index blocks are evicted and read back.
* *

@ -69,5 +69,5 @@ public class BloomFilter extends Filter {
this(bitsPerKey); this(bitsPerKey);
} }
private native static long createNewBloomFilter(final double bitsKeyKey); private static native long createNewBloomFilter(final double bitsKeyKey);
} }

@ -12,7 +12,7 @@ import java.util.List;
/** /**
* A ByteBuffer containing fetched data, together with a result for the fetch * A ByteBuffer containing fetched data, together with a result for the fetch
* and the total size of the object fetched. * and the total size of the object fetched.
* * <p>
* Used for the individual results of * Used for the individual results of
* {@link RocksDB#multiGetByteBuffers(List, List)} * {@link RocksDB#multiGetByteBuffers(List, List)}
* {@link RocksDB#multiGetByteBuffers(List, List, List)} * {@link RocksDB#multiGetByteBuffers(List, List, List)}

@ -35,6 +35,6 @@ public abstract class Cache extends RocksObject {
return getPinnedUsage(this.nativeHandle_); return getPinnedUsage(this.nativeHandle_);
} }
private native static long getUsage(final long handle); private static native long getUsage(final long handle);
private native static long getPinnedUsage(final long handle); private static native long getPinnedUsage(final long handle);
} }

@ -10,10 +10,11 @@ package org.rocksdb;
*/ */
public class CassandraCompactionFilter public class CassandraCompactionFilter
extends AbstractCompactionFilter<Slice> { extends AbstractCompactionFilter<Slice> {
public CassandraCompactionFilter(boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds) { public CassandraCompactionFilter(
final boolean purgeTtlOnExpiration, final int gcGracePeriodInSeconds) {
super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds));
} }
private native static long createNewCassandraCompactionFilter0( private static native long createNewCassandraCompactionFilter0(
boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds); boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds);
} }

@ -10,15 +10,15 @@ package org.rocksdb;
* values. * values.
*/ */
public class CassandraValueMergeOperator extends MergeOperator { public class CassandraValueMergeOperator extends MergeOperator {
public CassandraValueMergeOperator(int gcGracePeriodInSeconds) { public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0));
} }
public CassandraValueMergeOperator(int gcGracePeriodInSeconds, int operandsLimit) { public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit));
} }
private native static long newSharedCassandraValueMergeOperator( private static native long newSharedCassandraValueMergeOperator(
int gcGracePeriodInSeconds, int limit); int gcGracePeriodInSeconds, int limit);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);

@ -31,8 +31,7 @@ public class Checkpoint extends RocksObject {
throw new IllegalStateException( throw new IllegalStateException(
"RocksDB instance must be initialized."); "RocksDB instance must be initialized.");
} }
Checkpoint checkpoint = new Checkpoint(db); return new Checkpoint(db);
return checkpoint;
} }
/** /**
@ -53,11 +52,8 @@ public class Checkpoint extends RocksObject {
private Checkpoint(final RocksDB db) { private Checkpoint(final RocksDB db) {
super(newCheckpoint(db.nativeHandle_)); super(newCheckpoint(db.nativeHandle_));
this.db_ = db;
} }
private final RocksDB db_;
private static native long newCheckpoint(long dbHandle); private static native long newCheckpoint(long dbHandle);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);

@ -53,7 +53,7 @@ public class ClockCache extends Cache {
super(newClockCache(capacity, numShardBits, strictCapacityLimit)); super(newClockCache(capacity, numShardBits, strictCapacityLimit));
} }
private native static long newClockCache(final long capacity, private static native long newClockCache(
final int numShardBits, final boolean strictCapacityLimit); final long capacity, final int numShardBits, final boolean strictCapacityLimit);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
} }

@ -32,17 +32,17 @@ public class ColumnFamilyHandle extends RocksObject {
/** /**
* Constructor called only from JNI. * Constructor called only from JNI.
* * <p>
* NOTE: we are producing an additional Java Object here to represent the underlying native C++ * NOTE: we are producing an additional Java Object here to represent the underlying native C++
* ColumnFamilyHandle object. The underlying object is not owned by ourselves. The Java API user * ColumnFamilyHandle object. The underlying object is not owned by ourselves. The Java API user
* likely already had a ColumnFamilyHandle Java object which owns the underlying C++ object, as * likely already had a ColumnFamilyHandle Java object which owns the underlying C++ object, as
* they will have been presented it when they opened the database or added a Column Family. * they will have been presented it when they opened the database or added a Column Family.
* * <p>
* *
* TODO(AR) - Potentially a better design would be to cache the active Java Column Family Objects * TODO(AR) - Potentially a better design would be to cache the active Java Column Family Objects
* in RocksDB, and return the same Java Object instead of instantiating a new one here. This could * in RocksDB, and return the same Java Object instead of instantiating a new one here. This could
* also help us to improve the Java API semantics for Java users. See for example * also help us to improve the Java API semantics for Java users. See for example
* https://github.com/facebook/rocksdb/issues/2687. * <a href="https://github.com/facebook/rocksdb/issues/2687">...</a>.
* *
* @param nativeHandle native handle to the column family. * @param nativeHandle native handle to the column family.
*/ */
@ -80,7 +80,7 @@ public class ColumnFamilyHandle extends RocksObject {
* information, this call might internally lock and release DB mutex to * information, this call might internally lock and release DB mutex to
* access the up-to-date CF options. In addition, all the pointer-typed * access the up-to-date CF options. In addition, all the pointer-typed
* options cannot be referenced any longer than the original options exist. * options cannot be referenced any longer than the original options exist.
* * <p>
* Note that this function is not supported in RocksDBLite. * Note that this function is not supported in RocksDBLite.
* *
* @return the up-to-date descriptor. * @return the up-to-date descriptor.
@ -107,7 +107,7 @@ public class ColumnFamilyHandle extends RocksObject {
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
getID() == that.getID() && getID() == that.getID() &&
Arrays.equals(getName(), that.getName()); Arrays.equals(getName(), that.getName());
} catch (RocksDBException e) { } catch (final RocksDBException e) {
throw new RuntimeException("Cannot compare column family handles", e); throw new RuntimeException("Cannot compare column family handles", e);
} }
} }
@ -118,7 +118,7 @@ public class ColumnFamilyHandle extends RocksObject {
int result = Objects.hash(getID(), rocksDB_.nativeHandle_); int result = Objects.hash(getID(), rocksDB_.nativeHandle_);
result = 31 * result + Arrays.hashCode(getName()); result = 31 * result + Arrays.hashCode(getName());
return result; return result;
} catch (RocksDBException e) { } catch (final RocksDBException e) {
throw new RuntimeException("Cannot calculate hash code of column family handle", e); throw new RuntimeException("Cannot calculate hash code of column family handle", e);
} }
} }

@ -1291,7 +1291,7 @@ public class ColumnFamilyOptions extends RocksObject
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
* *
* @param prepopulateBlobCache the prepopulate blob cache option * @param prepopulateBlobCache prepopulate the blob cache option
* *
* @return the reference to the current options. * @return the reference to the current options.
*/ */

@ -121,9 +121,9 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
/** /**
* Set {@link BuiltinComparator} to be used with RocksDB. * Set {@link BuiltinComparator} to be used with RocksDB.
* * <p>
* Note: Comparator can be set once upon database creation. * Note: Comparator can be set once upon database creation.
* * <p>
* Default: BytewiseComparator. * Default: BytewiseComparator.
* @param builtinComparator a {@link BuiltinComparator} type. * @param builtinComparator a {@link BuiltinComparator} type.
* @return the instance of the current object. * @return the instance of the current object.
@ -133,11 +133,11 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
/** /**
* Use the specified comparator for key ordering. * Use the specified comparator for key ordering.
* * <p>
* Comparator should not be disposed before options instances using this comparator is * Comparator should not be disposed before options instances using this comparator is
* disposed. If dispose() function is not called, then comparator object will be * disposed. If dispose() function is not called, then comparator object will be
* GC'd automatically. * GC'd automatically.
* * <p>
* Comparator instance can be re-used in multiple options instances. * Comparator instance can be re-used in multiple options instances.
* *
* @param comparator java instance. * @param comparator java instance.
@ -176,17 +176,17 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* A single CompactionFilter instance to call into during compaction. * A single CompactionFilter instance to call into during compaction.
* Allows an application to modify/delete a key-value during background * Allows an application to modify/delete a key-value during background
* compaction. * compaction.
* * <p>
* If the client requires a new compaction filter to be used for different * If the client requires a new compaction filter to be used for different
* compaction runs, it can specify call * compaction runs, it can specify call
* {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
* instead. * instead.
* * <p>
* The client should specify only set one of the two. * The client should specify only set one of the two.
* {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence * {#setCompactionFilter(AbstractCompactionFilter)} takes precedence
* over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
* if the client specifies both. * if the client specifies both.
* * <p>
* If multithreaded compaction is being used, the supplied CompactionFilter * If multithreaded compaction is being used, the supplied CompactionFilter
* instance may be used from different threads concurrently and so should be thread-safe. * instance may be used from different threads concurrently and so should be thread-safe.
* *
@ -207,7 +207,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* This is a factory that provides {@link AbstractCompactionFilter} objects * This is a factory that provides {@link AbstractCompactionFilter} objects
* which allow an application to modify/delete a key-value during background * which allow an application to modify/delete a key-value during background
* compaction. * compaction.
* * <p>
* A new filter will be created on each compaction run. If multithreaded * A new filter will be created on each compaction run. If multithreaded
* compaction is being used, each created CompactionFilter will only be used * compaction is being used, each created CompactionFilter will only be used
* from a single thread and so does not need to be thread-safe. * from a single thread and so does not need to be thread-safe.
@ -228,7 +228,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
/** /**
* This prefix-extractor uses the first n bytes of a key as its prefix. * This prefix-extractor uses the first n bytes of a key as its prefix.
* * <p>
* In some hash-based memtable representation such as HashLinkedList * In some hash-based memtable representation such as HashLinkedList
* and HashSkipList, prefixes are used to partition the keys into * and HashSkipList, prefixes are used to partition the keys into
* several buckets. Prefix extractor is used to specify how to * several buckets. Prefix extractor is used to specify how to
@ -404,7 +404,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* families, it would have files and total size from all * families, it would have files and total size from all
* the column families combined. User should provision for the * the column families combined. User should provision for the
* total size(from all the column families) in such cases. * total size(from all the column families) in such cases.
* * <p>
* If left empty, db_paths will be used. * If left empty, db_paths will be used.
* Default: empty * Default: empty
* *
@ -422,7 +422,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* Compression algorithm that will be used for the bottommost level that * Compression algorithm that will be used for the bottommost level that
* contain files. If level-compaction is used, this option will only affect * contain files. If level-compaction is used, this option will only affect
* levels after base level. * levels after base level.
* * <p>
* Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
* *
* @param bottommostCompressionType The compression type to use for the * @param bottommostCompressionType The compression type to use for the
@ -437,7 +437,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* Compression algorithm that will be used for the bottommost level that * Compression algorithm that will be used for the bottommost level that
* contain files. If level-compaction is used, this option will only affect * contain files. If level-compaction is used, this option will only affect
* levels after base level. * levels after base level.
* * <p>
* Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
* *
* @return The compression type used for the bottommost level * @return The compression type used for the bottommost level
@ -447,7 +447,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
/** /**
* Set the options for compression algorithms used by * Set the options for compression algorithms used by
* {@link #bottommostCompressionType()} if it is enabled. * {@link #bottommostCompressionType()} if it is enabled.
* * <p>
* To enable it, please see the definition of * To enable it, please see the definition of
* {@link CompressionOptions}. * {@link CompressionOptions}.
* *
@ -460,7 +460,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
/** /**
* Get the bottom most compression options. * Get the bottom most compression options.
* * <p>
* See {@link #setBottommostCompressionOptions(CompressionOptions)}. * See {@link #setBottommostCompressionOptions(CompressionOptions)}.
* *
* @return the bottom most compression options. * @return the bottom most compression options.
@ -489,7 +489,7 @@ public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInter
* partitioning of sst files. This helps compaction to split the files * partitioning of sst files. This helps compaction to split the files
* on interesting boundaries (key prefixes) to make propagation of sst * on interesting boundaries (key prefixes) to make propagation of sst
* files less write amplifying (covering the whole key space). * files less write amplifying (covering the whole key space).
* * <p>
* Default: nullptr * Default: nullptr
* *
* @param factory The factory reference * @param factory The factory reference

@ -10,11 +10,10 @@ package org.rocksdb;
* any compaction that is using this CompactRangeOptions. * any compaction that is using this CompactRangeOptions.
*/ */
public class CompactRangeOptions extends RocksObject { public class CompactRangeOptions extends RocksObject {
private static final byte VALUE_kSkip = 0;
private final static byte VALUE_kSkip = 0; private static final byte VALUE_kIfHaveCompactionFilter = 1;
private final static byte VALUE_kIfHaveCompactionFilter = 1; private static final byte VALUE_kForce = 2;
private final static byte VALUE_kForce = 2; private static final byte VALUE_kForceOptimized = 3;
private final static byte VALUE_kForceOptimized = 3;
// For level based compaction, we can configure if we want to skip/force bottommost level // For level based compaction, we can configure if we want to skip/force bottommost level
// compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in
@ -219,7 +218,7 @@ public class CompactRangeOptions extends RocksObject {
return this; return this;
} }
private native static long newCompactRangeOptions(); private static native long newCompactRangeOptions();
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native boolean exclusiveManualCompaction(final long handle); private native boolean exclusiveManualCompaction(final long handle);

@ -98,7 +98,7 @@ public class CompactionJobInfo extends RocksObject {
/** /**
* Get the table properties for the input and output tables. * Get the table properties for the input and output tables.
* * <p>
* The map is keyed by values from {@link #inputFiles()} and * The map is keyed by values from {@link #inputFiles()} and
* {@link #outputFiles()}. * {@link #outputFiles()}.
* *

@ -17,7 +17,7 @@ public class CompactionOptionsFIFO extends RocksObject {
/** /**
* Once the total sum of table files reaches this, we will delete the oldest * Once the total sum of table files reaches this, we will delete the oldest
* table file * table file
* * <p>
* Default: 1GB * Default: 1GB
* *
* @param maxTableFilesSize The maximum size of the table files * @param maxTableFilesSize The maximum size of the table files
@ -33,7 +33,7 @@ public class CompactionOptionsFIFO extends RocksObject {
/** /**
* Once the total sum of table files reaches this, we will delete the oldest * Once the total sum of table files reaches this, we will delete the oldest
* table file * table file
* * <p>
* Default: 1GB * Default: 1GB
* *
* @return max table file size in bytes * @return max table file size in bytes
@ -48,7 +48,7 @@ public class CompactionOptionsFIFO extends RocksObject {
* and compaction won't trigger if average compact bytes per del file is * and compaction won't trigger if average compact bytes per del file is
* larger than options.write_buffer_size. This is to protect large files * larger than options.write_buffer_size. This is to protect large files
* from being compacted again. * from being compacted again.
* * <p>
* Default: false * Default: false
* *
* @param allowCompaction true to allow intra-L0 compaction * @param allowCompaction true to allow intra-L0 compaction
@ -61,13 +61,12 @@ public class CompactionOptionsFIFO extends RocksObject {
return this; return this;
} }
/** /**
* Check if intra-L0 compaction is enabled. * Check if intra-L0 compaction is enabled.
* When enabled, we try to compact smaller files into larger ones. * When enabled, we try to compact smaller files into larger ones.
* * <p>
* See {@link #setAllowCompaction(boolean)}. * See {@link #setAllowCompaction(boolean)}.
* * <p>
* Default: false * Default: false
* *
* @return true if intra-L0 compaction is enabled, false otherwise. * @return true if intra-L0 compaction is enabled, false otherwise.
@ -76,8 +75,7 @@ public class CompactionOptionsFIFO extends RocksObject {
return allowCompaction(nativeHandle_); return allowCompaction(nativeHandle_);
} }
private static native long newCompactionOptionsFIFO();
private native static long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setMaxTableFilesSize(final long handle, private native void setMaxTableFilesSize(final long handle,

@ -18,7 +18,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* Percentage flexibility while comparing file size. If the candidate file(s) * Percentage flexibility while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into * size is 1% smaller than the next file's size, then include next file into
* this candidate set. * this candidate set.
* * <p>
* Default: 1 * Default: 1
* *
* @param sizeRatio The size ratio to use * @param sizeRatio The size ratio to use
@ -34,7 +34,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* Percentage flexibility while comparing file size. If the candidate file(s) * Percentage flexibility while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into * size is 1% smaller than the next file's size, then include next file into
* this candidate set. * this candidate set.
* * <p>
* Default: 1 * Default: 1
* *
* @return The size ratio in use * @return The size ratio in use
@ -45,7 +45,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The minimum number of files in a single compaction run. * The minimum number of files in a single compaction run.
* * <p>
* Default: 2 * Default: 2
* *
* @param minMergeWidth minimum number of files in a single compaction run * @param minMergeWidth minimum number of files in a single compaction run
@ -59,7 +59,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The minimum number of files in a single compaction run. * The minimum number of files in a single compaction run.
* * <p>
* Default: 2 * Default: 2
* *
* @return minimum number of files in a single compaction run * @return minimum number of files in a single compaction run
@ -70,7 +70,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The maximum number of files in a single compaction run. * The maximum number of files in a single compaction run.
* * <p>
* Default: {@link Long#MAX_VALUE} * Default: {@link Long#MAX_VALUE}
* *
* @param maxMergeWidth maximum number of files in a single compaction run * @param maxMergeWidth maximum number of files in a single compaction run
@ -84,7 +84,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The maximum number of files in a single compaction run. * The maximum number of files in a single compaction run.
* * <p>
* Default: {@link Long#MAX_VALUE} * Default: {@link Long#MAX_VALUE}
* *
* @return maximum number of files in a single compaction run * @return maximum number of files in a single compaction run
@ -102,7 +102,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* a size amplification of 0%. Rocksdb uses the following heuristic * a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding * to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification. * the earliest file contribute to the size amplification.
* * <p>
* Default: 200, which means that a 100 byte database could require upto * Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage. * 300 bytes of storage.
* *
@ -126,7 +126,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* a size amplification of 0%. Rocksdb uses the following heuristic * a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding * to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification. * the earliest file contribute to the size amplification.
* * <p>
* Default: 200, which means that a 100 byte database could require upto * Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage. * 300 bytes of storage.
* *
@ -140,11 +140,11 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* If this option is set to be -1 (the default value), all the output files * If this option is set to be -1 (the default value), all the output files
* will follow compression type specified. * will follow compression type specified.
* * <p>
* If this option is not negative, we will try to make sure compressed * If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage * size is just above this value. In normal cases, at least this percentage
* of data will be compressed. * of data will be compressed.
* * <p>
* When we are compacting to a new file, here is the criteria whether * When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted * it needs to be compressed: assuming here are the list of files sorted
* by generation time: * by generation time:
@ -154,7 +154,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* well as the total size of C1...Ct as total_C, the compaction output file * well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff * will be compressed iff
* total_C / total_size &lt; this percentage * total_C / total_size &lt; this percentage
* * <p>
* Default: -1 * Default: -1
* *
* @param compressionSizePercent percentage of size for compression * @param compressionSizePercent percentage of size for compression
@ -170,11 +170,11 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* If this option is set to be -1 (the default value), all the output files * If this option is set to be -1 (the default value), all the output files
* will follow compression type specified. * will follow compression type specified.
* * <p>
* If this option is not negative, we will try to make sure compressed * If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage * size is just above this value. In normal cases, at least this percentage
* of data will be compressed. * of data will be compressed.
* * <p>
* When we are compacting to a new file, here is the criteria whether * When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted * it needs to be compressed: assuming here are the list of files sorted
* by generation time: * by generation time:
@ -184,7 +184,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* well as the total size of C1...Ct as total_C, the compaction output file * well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff * will be compressed iff
* total_C / total_size &lt; this percentage * total_C / total_size &lt; this percentage
* * <p>
* Default: -1 * Default: -1
* *
* @return percentage of size for compression * @return percentage of size for compression
@ -195,7 +195,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The algorithm used to stop picking files into a single compaction run * The algorithm used to stop picking files into a single compaction run
* * <p>
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
* *
* @param compactionStopStyle The compaction algorithm * @param compactionStopStyle The compaction algorithm
@ -210,7 +210,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* The algorithm used to stop picking files into a single compaction run * The algorithm used to stop picking files into a single compaction run
* * <p>
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
* *
* @return The compaction algorithm * @return The compaction algorithm
@ -222,7 +222,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* Option to optimize the universal multi level compaction by enabling * Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files. * trivial move for non overlapping files.
* * <p>
* Default: false * Default: false
* *
* @param allowTrivialMove true if trivial move is allowed * @param allowTrivialMove true if trivial move is allowed
@ -238,7 +238,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/** /**
* Option to optimize the universal multi level compaction by enabling * Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files. * trivial move for non overlapping files.
* * <p>
* Default: false * Default: false
* *
* @return true if trivial move is allowed * @return true if trivial move is allowed
@ -247,7 +247,7 @@ public class CompactionOptionsUniversal extends RocksObject {
return allowTrivialMove(nativeHandle_); return allowTrivialMove(nativeHandle_);
} }
private native static long newCompactionOptionsUniversal(); private static native long newCompactionOptionsUniversal();
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setSizeRatio(final long handle, final int sizeRatio); private native void setSizeRatio(final long handle, final int sizeRatio);

@ -9,7 +9,7 @@ import java.util.List;
/** /**
* Enum CompactionStyle * Enum CompactionStyle
* * <p>
* RocksDB supports different styles of compaction. Available * RocksDB supports different styles of compaction. Available
* compaction styles can be chosen using this enumeration. * compaction styles can be chosen using this enumeration.
* *
@ -25,7 +25,8 @@ import java.util.List;
* the old data, so it's basically a TTL compaction style.</li> * the old data, so it's basically a TTL compaction style.</li>
* <li><strong>NONE</strong> - Disable background compaction. * <li><strong>NONE</strong> - Disable background compaction.
* Compaction jobs are submitted * Compaction jobs are submitted
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.</li> * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int,
* CompactionJobInfo)} ()}.</li>
* </ol> * </ol>
* *
* @see <a * @see <a

@ -8,7 +8,7 @@ package org.rocksdb;
* This class controls the behaviour * This class controls the behaviour
* of Java implementations of * of Java implementations of
* AbstractComparator * AbstractComparator
* * <p>
* Note that dispose() must be called before a ComparatorOptions * Note that dispose() must be called before a ComparatorOptions
* instance becomes out-of-scope to release the allocated memory in C++. * instance becomes out-of-scope to release the allocated memory in C++.
*/ */
@ -48,10 +48,10 @@ public class ComparatorOptions extends RocksObject {
} }
/** /**
* Indicates if a direct byte buffer (i.e. outside of the normal * Indicates if a direct byte buffer (i.e. outside the normal
* garbage-collected heap) is used, as opposed to a non-direct byte buffer * garbage-collected heap) is used, as opposed to a non-direct byte buffer
* which is a wrapper around an on-heap byte[]. * which is a wrapper around an on-heap byte[].
* * <p>
* Default: true * Default: true
* *
* @return true if a direct byte buffer will be used, false otherwise * @return true if a direct byte buffer will be used, false otherwise
@ -62,10 +62,10 @@ public class ComparatorOptions extends RocksObject {
} }
/** /**
* Controls whether a direct byte buffer (i.e. outside of the normal * Controls whether a direct byte buffer (i.e. outside the normal
* garbage-collected heap) is used, as opposed to a non-direct byte buffer * garbage-collected heap) is used, as opposed to a non-direct byte buffer
* which is a wrapper around an on-heap byte[]. * which is a wrapper around an on-heap byte[].
* * <p>
* Default: true * Default: true
* *
* @param useDirectBuffer true if a direct byte buffer should be used, * @param useDirectBuffer true if a direct byte buffer should be used,
@ -86,7 +86,7 @@ public class ComparatorOptions extends RocksObject {
* if it requires less than {@code maxReuseBufferSize}, then an * if it requires less than {@code maxReuseBufferSize}, then an
* existing buffer will be reused, else a new buffer will be * existing buffer will be reused, else a new buffer will be
* allocated just for that callback. * allocated just for that callback.
* * <p>
* Default: 64 bytes * Default: 64 bytes
* *
* @return the maximum size of a buffer which is reused, * @return the maximum size of a buffer which is reused,
@ -105,7 +105,7 @@ public class ComparatorOptions extends RocksObject {
* if it requires less than {@code maxReuseBufferSize}, then an * if it requires less than {@code maxReuseBufferSize}, then an
* existing buffer will be reused, else a new buffer will be * existing buffer will be reused, else a new buffer will be
* allocated just for that callback. * allocated just for that callback.
* * <p>
* Default: 64 bytes * Default: 64 bytes
* *
* @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to * @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to
@ -119,7 +119,7 @@ public class ComparatorOptions extends RocksObject {
return this; return this;
} }
private native static long newComparatorOptions(); private static native long newComparatorOptions();
private native byte reusedSynchronisationType(final long handle); private native byte reusedSynchronisationType(final long handle);
private native void setReusedSynchronisationType(final long handle, private native void setReusedSynchronisationType(final long handle,
final byte reusedSynchronisationType); final byte reusedSynchronisationType);

@ -48,9 +48,9 @@ public class CompressionOptions extends RocksObject {
* loaded into the compression library before compressing/uncompressing each * loaded into the compression library before compressing/uncompressing each
* data block of subsequent files in the subcompaction. Effectively, this * data block of subsequent files in the subcompaction. Effectively, this
* improves compression ratios when there are repetitions across data blocks. * improves compression ratios when there are repetitions across data blocks.
* * <p>
* A value of 0 indicates the feature is disabled. * A value of 0 indicates the feature is disabled.
* * <p>
* Default: 0. * Default: 0.
* *
* @param maxDictBytes Maximum bytes to use for the dictionary * @param maxDictBytes Maximum bytes to use for the dictionary
@ -75,10 +75,10 @@ public class CompressionOptions extends RocksObject {
* Maximum size of training data passed to zstd's dictionary trainer. Using * Maximum size of training data passed to zstd's dictionary trainer. Using
* zstd's dictionary trainer can achieve even better compression ratio * zstd's dictionary trainer can achieve even better compression ratio
* improvements than using {@link #setMaxDictBytes(int)} alone. * improvements than using {@link #setMaxDictBytes(int)} alone.
* * <p>
* The training data will be used to generate a dictionary * The training data will be used to generate a dictionary
* of {@link #maxDictBytes()}. * of {@link #maxDictBytes()}.
* * <p>
* Default: 0. * Default: 0.
* *
* @param zstdMaxTrainBytes Maximum bytes to use for training ZStd. * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd.
@ -104,10 +104,10 @@ public class CompressionOptions extends RocksObject {
* For bottommost_compression_opts, to enable it, user must set enabled=true. * For bottommost_compression_opts, to enable it, user must set enabled=true.
* Otherwise, bottommost compression will use compression_opts as default * Otherwise, bottommost compression will use compression_opts as default
* compression options. * compression options.
* * <p>
* For compression_opts, if compression_opts.enabled=false, it is still * For compression_opts, if compression_opts.enabled=false, it is still
* used as compression options for compression process. * used as compression options for compression process.
* * <p>
* Default: false. * Default: false.
* *
* @param enabled true to use these compression options * @param enabled true to use these compression options
@ -131,8 +131,7 @@ public class CompressionOptions extends RocksObject {
return enabled(nativeHandle_); return enabled(nativeHandle_);
} }
private static native long newCompressionOptions();
private native static long newCompressionOptions();
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setWindowBits(final long handle, final int windowBits); private native void setWindowBits(final long handle, final int windowBits);

@ -35,9 +35,9 @@ public enum CompressionType {
* *
* @return CompressionType instance. * @return CompressionType instance.
*/ */
public static CompressionType getCompressionType(String libraryName) { public static CompressionType getCompressionType(final String libraryName) {
if (libraryName != null) { if (libraryName != null) {
for (CompressionType compressionType : CompressionType.values()) { for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.getLibraryName() != null && if (compressionType.getLibraryName() != null &&
compressionType.getLibraryName().equals(libraryName)) { compressionType.getLibraryName().equals(libraryName)) {
return compressionType; return compressionType;
@ -58,7 +58,7 @@ public enum CompressionType {
* @throws IllegalArgumentException If CompressionType cannot be found for the * @throws IllegalArgumentException If CompressionType cannot be found for the
* provided byteIdentifier * provided byteIdentifier
*/ */
public static CompressionType getCompressionType(byte byteIdentifier) { public static CompressionType getCompressionType(final byte byteIdentifier) {
for (final CompressionType compressionType : CompressionType.values()) { for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.getValue() == byteIdentifier) { if (compressionType.getValue() == byteIdentifier) {
return compressionType; return compressionType;

@ -44,10 +44,10 @@ public class ConfigOptions extends RocksObject {
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native static long newConfigOptions(); private static native long newConfigOptions();
private native static void setEnv(final long handle, final long envHandle); private static native void setEnv(final long handle, final long envHandle);
private native static void setDelimiter(final long handle, final String delimiter); private static native void setDelimiter(final long handle, final String delimiter);
private native static void setIgnoreUnknownOptions(final long handle, final boolean ignore); private static native void setIgnoreUnknownOptions(final long handle, final boolean ignore);
private native static void setInputStringsEscaped(final long handle, final boolean escaped); private static native void setInputStringsEscaped(final long handle, final boolean escaped);
private native static void setSanityLevel(final long handle, final byte level); private static native void setSanityLevel(final long handle, final byte level);
} }

@ -11,7 +11,7 @@ import java.util.*;
/** /**
* DBOptions to control the behavior of a database. It will be used * DBOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* * <p>
* As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
* and will be automatically released if opened in the preamble of a try with resources block. * and will be automatically released if opened in the preamble of a try with resources block.
*/ */
@ -24,7 +24,7 @@ public class DBOptions extends RocksObject
/** /**
* Construct DBOptions. * Construct DBOptions.
* * <p>
* This constructor will create (by allocating a block of memory) * This constructor will create (by allocating a block of memory)
* an {@code rocksdb::DBOptions} in the c++ side. * an {@code rocksdb::DBOptions} in the c++ side.
*/ */
@ -36,13 +36,13 @@ public class DBOptions extends RocksObject
/** /**
* Copy constructor for DBOptions. * Copy constructor for DBOptions.
* * <p>
* NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager, * NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager,
* info_log and other pointers will be cloned! * info_log and other pointers will be cloned!
* *
* @param other The DBOptions to copy. * @param other The DBOptions to copy.
*/ */
public DBOptions(DBOptions other) { public DBOptions(final DBOptions other) {
super(copyDBOptions(other.nativeHandle_)); super(copyDBOptions(other.nativeHandle_));
this.env_ = other.env_; this.env_ = other.env_;
this.numShardBits_ = other.numShardBits_; this.numShardBits_ = other.numShardBits_;

@ -10,13 +10,13 @@ import java.nio.ByteBuffer;
/** /**
* Base class for slices which will receive direct * Base class for slices which will receive direct
* ByteBuffer based access to the underlying data. * ByteBuffer based access to the underlying data.
* * <p>
* ByteBuffer backed slices typically perform better with * ByteBuffer backed slices typically perform better with
* larger keys and values. When using smaller keys and * larger keys and values. When using smaller keys and
* values consider using @see org.rocksdb.Slice * values consider using @see org.rocksdb.Slice
*/ */
public class DirectSlice extends AbstractSlice<ByteBuffer> { public class DirectSlice extends AbstractSlice<ByteBuffer> {
public final static DirectSlice NONE = new DirectSlice(); public static final DirectSlice NONE = new DirectSlice();
/** /**
* Indicates whether we have to free the memory pointed to by the Slice * Indicates whether we have to free the memory pointed to by the Slice
@ -29,7 +29,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* Called from JNI to construct a new Java DirectSlice * Called from JNI to construct a new Java DirectSlice
* without an underlying C++ object set * without an underlying C++ object set
* at creation time. * at creation time.
* * <p>
* Note: You should be aware that it is intentionally marked as * Note: You should be aware that it is intentionally marked as
* package-private. This is so that developers cannot construct their own * package-private. This is so that developers cannot construct their own
* default DirectSlice objects (at present). As developers cannot construct * default DirectSlice objects (at present). As developers cannot construct
@ -123,9 +123,8 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
disposeInternal(nativeHandle); disposeInternal(nativeHandle);
} }
private native static long createNewDirectSlice0(final ByteBuffer data, private static native long createNewDirectSlice0(final ByteBuffer data, final int length);
final int length); private static native long createNewDirectSlice1(final ByteBuffer data);
private native static long createNewDirectSlice1(final ByteBuffer data);
@Override protected final native ByteBuffer data0(long handle); @Override protected final native ByteBuffer data0(long handle);
private native byte get0(long handle, int offset); private native byte get0(long handle, int offset);
private native void clear0(long handle, boolean internalBuffer, private native void clear0(long handle, boolean internalBuffer,

@ -47,7 +47,7 @@ public enum EncodingType {
return value_; return value_;
} }
private EncodingType(byte value) { private EncodingType(final byte value) {
value_ = value; value_ = value;
} }

@ -19,7 +19,7 @@ public abstract class Env extends RocksObject {
private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal()); private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal());
static { static {
/** /*
* The Ownership of the Default Env belongs to C++ * The Ownership of the Default Env belongs to C++
* and so we disown the native handle here so that * and so we disown the native handle here so that
* we cannot accidentally free it from Java. * we cannot accidentally free it from Java.

@ -31,7 +31,7 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable memory mapped reads. * Enable/Disable memory mapped reads.
* * <p>
* Default: false * Default: false
* *
* @param useMmapReads true to enable memory mapped reads, false to disable. * @param useMmapReads true to enable memory mapped reads, false to disable.
@ -55,7 +55,7 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable memory mapped Writes. * Enable/Disable memory mapped Writes.
* * <p>
* Default: true * Default: true
* *
* @param useMmapWrites true to enable memory mapped writes, false to disable. * @param useMmapWrites true to enable memory mapped writes, false to disable.
@ -79,7 +79,7 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable direct reads, i.e. {@code O_DIRECT}. * Enable/Disable direct reads, i.e. {@code O_DIRECT}.
* * <p>
* Default: false * Default: false
* *
* @param useDirectReads true to enable direct reads, false to disable. * @param useDirectReads true to enable direct reads, false to disable.
@ -103,7 +103,7 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable direct writes, i.e. {@code O_DIRECT}. * Enable/Disable direct writes, i.e. {@code O_DIRECT}.
* * <p>
* Default: false * Default: false
* *
* @param useDirectWrites true to enable direct writes, false to disable. * @param useDirectWrites true to enable direct writes, false to disable.
@ -127,9 +127,9 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable fallocate calls. * Enable/Disable fallocate calls.
* * <p>
* Default: true * Default: true
* * <p>
* If false, {@code fallocate()} calls are bypassed. * If false, {@code fallocate()} calls are bypassed.
* *
* @param allowFallocate true to enable fallocate calls, false to disable. * @param allowFallocate true to enable fallocate calls, false to disable.
@ -153,7 +153,7 @@ public class EnvOptions extends RocksObject {
/** /**
* Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors. * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors.
* * <p>
* Default: true * Default: true
* *
* @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit, * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit,
@ -181,7 +181,7 @@ public class EnvOptions extends RocksObject {
* Allows OS to incrementally sync files to disk while they are being * Allows OS to incrementally sync files to disk while they are being
* written, in the background. Issue one request for every * written, in the background. Issue one request for every
* {@code bytesPerSync} written. * {@code bytesPerSync} written.
* * <p>
* Default: 0 * Default: 0
* *
* @param bytesPerSync 0 to disable, otherwise the number of bytes. * @param bytesPerSync 0 to disable, otherwise the number of bytes.
@ -323,8 +323,8 @@ public class EnvOptions extends RocksObject {
return rateLimiter; return rateLimiter;
} }
private native static long newEnvOptions(); private static native long newEnvOptions();
private native static long newEnvOptions(final long dboptions_handle); private static native long newEnvOptions(final long dboptions_handle);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setUseMmapReads(final long handle, private native void setUseMmapReads(final long handle,

@ -12,7 +12,7 @@ import java.util.List;
* be called when specific RocksDB event happens such as flush. It can * be called when specific RocksDB event happens such as flush. It can
* be used as a building block for developing custom features such as * be used as a building block for developing custom features such as
* stats-collector or external compaction algorithm. * stats-collector or external compaction algorithm.
* * <p>
* Note that callback functions should not run for an extended period of * Note that callback functions should not run for an extended period of
* time before the function returns, otherwise RocksDB may be blocked. * time before the function returns, otherwise RocksDB may be blocked.
* For example, it is not suggested to do * For example, it is not suggested to do
@ -21,17 +21,17 @@ import java.util.List;
* {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} * {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])}
* (as Put may be blocked in certain cases) in the same thread in the * (as Put may be blocked in certain cases) in the same thread in the
* EventListener callback. * EventListener callback.
* * <p>
* However, doing * However, doing
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int,
* CompactionJobInfo)} and {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} in * CompactionJobInfo)} and {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} in
* another thread is considered safe. * another thread is considered safe.
* * <p>
* [Threading] All EventListener callback will be called using the * [Threading] All EventListener callback will be called using the
* actual thread that involves in that specific event. For example, it * actual thread that involves in that specific event. For example, it
* is the RocksDB background flush thread that does the actual flush to * is the RocksDB background flush thread that does the actual flush to
* call {@link #onFlushCompleted(RocksDB, FlushJobInfo)}. * call {@link #onFlushCompleted(RocksDB, FlushJobInfo)}.
* * <p>
* [Locking] All EventListener callbacks are designed to be called without * [Locking] All EventListener callbacks are designed to be called without
* the current thread holding any DB mutex. This is to prevent potential * the current thread holding any DB mutex. This is to prevent potential
* deadlock and performance issue when using EventListener callback * deadlock and performance issue when using EventListener callback
@ -41,7 +41,7 @@ public interface EventListener {
/** /**
* A callback function to RocksDB which will be called before a * A callback function to RocksDB which will be called before a
* RocksDB starts to flush memtables. * RocksDB starts to flush memtables.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -55,7 +55,7 @@ public interface EventListener {
/** /**
* callback function to RocksDB which will be called whenever a * callback function to RocksDB which will be called whenever a
* registered RocksDB flushes a file. * registered RocksDB flushes a file.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -77,7 +77,7 @@ public interface EventListener {
* on file creations and deletions is suggested to implement * on file creations and deletions is suggested to implement
* {@link #onFlushCompleted(RocksDB, FlushJobInfo)} and * {@link #onFlushCompleted(RocksDB, FlushJobInfo)} and
* {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}. * {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}.
* * <p>
* Note that if applications would like to use the passed reference * Note that if applications would like to use the passed reference
* outside this function call, they should make copies from the * outside this function call, they should make copies from the
* returned value. * returned value.
@ -91,7 +91,7 @@ public interface EventListener {
* A callback function to RocksDB which will be called before a * A callback function to RocksDB which will be called before a
* RocksDB starts to compact. The default implementation is * RocksDB starts to compact. The default implementation is
* no-op. * no-op.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -108,7 +108,7 @@ public interface EventListener {
* A callback function for RocksDB which will be called whenever * A callback function for RocksDB which will be called whenever
* a registered RocksDB compacts a file. The default implementation * a registered RocksDB compacts a file. The default implementation
* is a no-op. * is a no-op.
* * <p>
* Note that this function must be implemented in a way such that * Note that this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -129,11 +129,11 @@ public interface EventListener {
* of a pointer to DB. Applications that build logic basic based * of a pointer to DB. Applications that build logic basic based
* on file creations and deletions is suggested to implement * on file creations and deletions is suggested to implement
* OnFlushCompleted and OnCompactionCompleted. * OnFlushCompleted and OnCompactionCompleted.
* * <p>
* Historically it will only be called if the file is successfully created. * Historically it will only be called if the file is successfully created.
* Now it will also be called on failure case. User can check info.status * Now it will also be called on failure case. User can check info.status
* to see if it succeeded or not. * to see if it succeeded or not.
* * <p>
* Note that if applications would like to use the passed reference * Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these * outside this function call, they should make copies from these
* returned value. * returned value.
@ -147,7 +147,7 @@ public interface EventListener {
* A callback function for RocksDB which will be called before * A callback function for RocksDB which will be called before
* a SST file is being created. It will follow by OnTableFileCreated after * a SST file is being created. It will follow by OnTableFileCreated after
* the creation finishes. * the creation finishes.
* * <p>
* Note that if applications would like to use the passed reference * Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these * outside this function call, they should make copies from these
* returned value. * returned value.
@ -160,11 +160,11 @@ public interface EventListener {
/** /**
* A callback function for RocksDB which will be called before * A callback function for RocksDB which will be called before
* a memtable is made immutable. * a memtable is made immutable.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
* * <p>
* Note that if applications would like to use the passed reference * Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these * outside this function call, they should make copies from these
* returned value. * returned value.
@ -177,7 +177,7 @@ public interface EventListener {
/** /**
* A callback function for RocksDB which will be called before * A callback function for RocksDB which will be called before
* a column family handle is deleted. * a column family handle is deleted.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -190,7 +190,7 @@ public interface EventListener {
/** /**
* A callback function for RocksDB which will be called after an external * A callback function for RocksDB which will be called after an external
* file is ingested using IngestExternalFile. * file is ingested using IngestExternalFile.
* * <p>
* Note that the this function will run on the same thread as * Note that the this function will run on the same thread as
* IngestExternalFile(), if this function is blocked, IngestExternalFile() * IngestExternalFile(), if this function is blocked, IngestExternalFile()
* will be blocked from finishing. * will be blocked from finishing.
@ -210,7 +210,7 @@ public interface EventListener {
* preventing the database from entering read-only mode. We do not provide any * preventing the database from entering read-only mode. We do not provide any
* guarantee when failed flushes/compactions will be rescheduled if the user * guarantee when failed flushes/compactions will be rescheduled if the user
* suppresses an error. * suppresses an error.
* * <p>
* Note that this function can run on the same threads as flush, compaction, * Note that this function can run on the same threads as flush, compaction,
* and user writes. So, it is extremely important not to perform heavy * and user writes. So, it is extremely important not to perform heavy
* computations or blocking calls in this function. * computations or blocking calls in this function.
@ -224,7 +224,7 @@ public interface EventListener {
/** /**
* A callback function for RocksDB which will be called whenever a change * A callback function for RocksDB which will be called whenever a change
* of superversion triggers a change of the stall conditions. * of superversion triggers a change of the stall conditions.
* * <p>
* Note that the this function must be implemented in a way such that * Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function * it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked. * returns. Otherwise, RocksDB may be blocked.
@ -301,7 +301,7 @@ public interface EventListener {
* If true, the {@link #onFileReadFinish(FileOperationInfo)} * If true, the {@link #onFileReadFinish(FileOperationInfo)}
* and {@link #onFileWriteFinish(FileOperationInfo)} will be called. If * and {@link #onFileWriteFinish(FileOperationInfo)} will be called. If
* false, then they won't be called. * false, then they won't be called.
* * <p>
* Default: false * Default: false
* *
* @return whether to callback when file read/write is finished * @return whether to callback when file read/write is finished

@ -74,12 +74,12 @@ public class ExternalFileIngestionInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o; final ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o;
return globalSeqno == that.globalSeqno return globalSeqno == that.globalSeqno
&& Objects.equals(columnFamilyName, that.columnFamilyName) && Objects.equals(columnFamilyName, that.columnFamilyName)
&& Objects.equals(externalFilePath, that.externalFilePath) && Objects.equals(externalFilePath, that.externalFilePath)

@ -87,7 +87,7 @@ public class FileOperationInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())

@ -90,7 +90,7 @@ public class FlushJobInfo {
* Determine if rocksdb is currently slowing-down all writes to prevent * Determine if rocksdb is currently slowing-down all writes to prevent
* creating too many Level 0 files as compaction seems not able to * creating too many Level 0 files as compaction seems not able to
* catch up the write request speed. * catch up the write request speed.
* * <p>
* This indicates that there are too many files in Level 0. * This indicates that there are too many files in Level 0.
* *
* @return true if rocksdb is currently slowing-down all writes, * @return true if rocksdb is currently slowing-down all writes,
@ -103,7 +103,7 @@ public class FlushJobInfo {
/** /**
* Determine if rocksdb is currently blocking any writes to prevent * Determine if rocksdb is currently blocking any writes to prevent
* creating more L0 files. * creating more L0 files.
* * <p>
* This indicates that there are too many files in level 0. * This indicates that there are too many files in level 0.
* Compactions should try to compact L0 files down to lower levels as soon * Compactions should try to compact L0 files down to lower levels as soon
* as possible. * as possible.
@ -151,12 +151,12 @@ public class FlushJobInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
FlushJobInfo that = (FlushJobInfo) o; final FlushJobInfo that = (FlushJobInfo) o;
return columnFamilyId == that.columnFamilyId && threadId == that.threadId && jobId == that.jobId return columnFamilyId == that.columnFamilyId && threadId == that.threadId && jobId == that.jobId
&& triggeredWritesSlowdown == that.triggeredWritesSlowdown && triggeredWritesSlowdown == that.triggeredWritesSlowdown
&& triggeredWritesStop == that.triggeredWritesStop && smallestSeqno == that.smallestSeqno && triggeredWritesStop == that.triggeredWritesStop && smallestSeqno == that.smallestSeqno

@ -47,13 +47,13 @@ public class FlushOptions extends RocksObject {
} }
/** /**
* Set to true so that flush would proceeds immediately even it it means * Set to true so that flush would proceed immediately even if it means
* writes will stall for the duration of the flush. * writes will stall for the duration of the flush.
* * <p>
* Set to false so that the operation will wait until it's possible to do * Set to false so that the operation will wait until it's possible to do
* the flush without causing stall or until required flush is performed by * the flush without causing stall or until required flush is performed by
* someone else (foreground call or background thread). * someone else (foreground call or background thread).
* * <p>
* Default: false * Default: false
* *
* @param allowWriteStall true to allow writes to stall for flush, false * @param allowWriteStall true to allow writes to stall for flush, false
@ -78,7 +78,7 @@ public class FlushOptions extends RocksObject {
return allowWriteStall(nativeHandle_); return allowWriteStall(nativeHandle_);
} }
private native static long newFlushOptions(); private static native long newFlushOptions();
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setWaitForFlush(final long handle, private native void setWaitForFlush(final long handle,

@ -6,7 +6,7 @@ package org.rocksdb;
* Such memtable contains a fix-sized array of buckets, where * Such memtable contains a fix-sized array of buckets, where
* each bucket points to a sorted singly-linked * each bucket points to a sorted singly-linked
* list (or null if the bucket is empty). * list (or null if the bucket is empty).
* * <p>
* Note that since this mem-table representation relies on the * Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor * key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key. * functions to specify how to extract key prefix given a key.

@ -6,7 +6,7 @@ package org.rocksdb;
* Such mem-table representation contains a fix-sized array of * Such mem-table representation contains a fix-sized array of
* buckets, where each bucket points to a skiplist (or null if the * buckets, where each bucket points to a skiplist (or null if the
* bucket is empty). * bucket is empty).
* * <p>
* Note that since this mem-table representation relies on the * Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor * key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key. * functions to specify how to extract key prefix given a key.

@ -63,7 +63,7 @@ public enum HistogramType {
/** /**
* number of bytes decompressed. * number of bytes decompressed.
* * <p>
* number of bytes is when uncompressed; i.e. before/after respectively * number of bytes is when uncompressed; i.e. before/after respectively
*/ */
BYTES_DECOMPRESSED((byte) 0x1B), BYTES_DECOMPRESSED((byte) 0x1B),

@ -47,7 +47,7 @@ public enum IndexType {
return value_; return value_;
} }
IndexType(byte value) { IndexType(final byte value) {
value_ = value; value_ = value;
} }

@ -15,7 +15,7 @@ public enum InfoLogLevel {
private final byte value_; private final byte value_;
private InfoLogLevel(final byte value) { InfoLogLevel(final byte value) {
value_ = value; value_ = value;
} }

@ -136,15 +136,15 @@ public class IngestExternalFileOptions extends RocksObject {
/** /**
* Set to true if you would like duplicate keys in the file being ingested * Set to true if you would like duplicate keys in the file being ingested
* to be skipped rather than overwriting existing data under that key. * to be skipped rather than overwriting existing data under that key.
* * <p>
* Usecase: back-fill of some historical data in the database without * Usecase: back-fill of some historical data in the database without
* over-writing existing newer version of data. * over-writing existing newer version of data.
* * <p>
* This option could only be used if the DB has been running * This option could only be used if the DB has been running
* with DBOptions#allowIngestBehind() == true since the dawn of time. * with DBOptions#allowIngestBehind() == true since the dawn of time.
* * <p>
* All files will be ingested at the bottommost level with seqno=0. * All files will be ingested at the bottommost level with seqno=0.
* * <p>
* Default: false * Default: false
* *
* @param ingestBehind true if you would like duplicate keys in the file being * @param ingestBehind true if you would like duplicate keys in the file being
@ -160,7 +160,7 @@ public class IngestExternalFileOptions extends RocksObject {
/** /**
* Returns true write if the global_seqno is written to a given offset * Returns true write if the global_seqno is written to a given offset
* in the external SST file for backward compatibility. * in the external SST file for backward compatibility.
* * <p>
* See {@link #setWriteGlobalSeqno(boolean)}. * See {@link #setWriteGlobalSeqno(boolean)}.
* *
* @return true if the global_seqno is written to a given offset, * @return true if the global_seqno is written to a given offset,
@ -173,21 +173,21 @@ public class IngestExternalFileOptions extends RocksObject {
/** /**
* Set to true if you would like to write the global_seqno to a given offset * Set to true if you would like to write the global_seqno to a given offset
* in the external SST file for backward compatibility. * in the external SST file for backward compatibility.
* * <p>
* Older versions of RocksDB write the global_seqno to a given offset within * Older versions of RocksDB write the global_seqno to a given offset within
* the ingested SST files, and new versions of RocksDB do not. * the ingested SST files, and new versions of RocksDB do not.
* * <p>
* If you ingest an external SST using new version of RocksDB and would like * If you ingest an external SST using new version of RocksDB and would like
* to be able to downgrade to an older version of RocksDB, you should set * to be able to downgrade to an older version of RocksDB, you should set
* {@link #writeGlobalSeqno()} to true. * {@link #writeGlobalSeqno()} to true.
* * <p>
* If your service is just starting to use the new RocksDB, we recommend that * If your service is just starting to use the new RocksDB, we recommend that
* you set this option to false, which brings two benefits: * you set this option to false, which brings two benefits:
* 1. No extra random write for global_seqno during ingestion. * 1. No extra random write for global_seqno during ingestion.
* 2. Without writing external SST file, it's possible to do checksum. * 2. Without writing external SST file, it's possible to do checksum.
* * <p>
* We have a plan to set this option to false by default in the future. * We have a plan to set this option to false by default in the future.
* * <p>
* Default: true * Default: true
* *
* @param writeGlobalSeqno true to write the gloal_seqno to a given offset, * @param writeGlobalSeqno true to write the gloal_seqno to a given offset,
@ -201,10 +201,10 @@ public class IngestExternalFileOptions extends RocksObject {
return this; return this;
} }
private native static long newIngestExternalFileOptions(); private static native long newIngestExternalFileOptions();
private native static long newIngestExternalFileOptions( private static native long newIngestExternalFileOptions(final boolean moveFiles,
final boolean moveFiles, final boolean snapshotConsistency, final boolean snapshotConsistency, final boolean allowGlobalSeqNo,
final boolean allowGlobalSeqNo, final boolean allowBlockingFlush); final boolean allowBlockingFlush);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native boolean moveFiles(final long handle); private native boolean moveFiles(final long handle);

@ -24,7 +24,6 @@ public class KeyMayExist {
} }
public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue }
;
public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) {
this.exists = exists; this.exists = exists;

@ -99,7 +99,7 @@ public class LRUCache extends Cache {
capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio)); capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio));
} }
private native static long newLRUCache(final long capacity, final int numShardBits, private static native long newLRUCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit, final double highPriPoolRatio, final boolean strictCapacityLimit, final double highPriPoolRatio,
final double lowPriPoolRatio); final double lowPriPoolRatio);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);

@ -36,9 +36,8 @@ package org.rocksdb;
* </p> * </p>
*/ */
public abstract class Logger extends RocksCallbackObject { public abstract class Logger extends RocksCallbackObject {
private static final long WITH_OPTIONS = 0;
private final static long WITH_OPTIONS = 0; private static final long WITH_DBOPTIONS = 1;
private final static long WITH_DBOPTIONS = 1;
/** /**
* <p>AbstractLogger constructor.</p> * <p>AbstractLogger constructor.</p>
@ -68,7 +67,7 @@ public abstract class Logger extends RocksCallbackObject {
} }
@Override @Override
protected long initializeNative(long... nativeParameterHandles) { protected long initializeNative(final long... nativeParameterHandles) {
if(nativeParameterHandles[1] == WITH_OPTIONS) { if(nativeParameterHandles[1] == WITH_OPTIONS) {
return createNewLoggerOptions(nativeParameterHandles[0]); return createNewLoggerOptions(nativeParameterHandles[0]);
} else if(nativeParameterHandles[1] == WITH_DBOPTIONS) { } else if(nativeParameterHandles[1] == WITH_DBOPTIONS) {

@ -8,7 +8,7 @@ package org.rocksdb;
* MemTableConfig is used to config the internal mem-table of a RocksDB. * MemTableConfig is used to config the internal mem-table of a RocksDB.
* It is required for each memtable to have one such sub-class to allow * It is required for each memtable to have one such sub-class to allow
* Java developers to use it. * Java developers to use it.
* * <p>
* To make a RocksDB to use a specific MemTable format, its associated * To make a RocksDB to use a specific MemTable format, its associated
* MemTableConfig should be properly set and passed into Options * MemTableConfig should be properly set and passed into Options
* via Options.setMemTableFactory() and open the db using that Options. * via Options.setMemTableFactory() and open the db using that Options.
@ -25,5 +25,5 @@ public abstract class MemTableConfig {
* *
* @return native handle address to native memory table instance. * @return native handle address to native memory table instance.
*/ */
abstract protected long newMemTableFactoryHandle(); protected abstract long newMemTableFactoryHandle();
} }

@ -77,12 +77,12 @@ public class MemTableInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
MemTableInfo that = (MemTableInfo) o; final MemTableInfo that = (MemTableInfo) o;
return firstSeqno == that.firstSeqno && earliestSeqno == that.earliestSeqno return firstSeqno == that.firstSeqno && earliestSeqno == that.earliestSeqno
&& numEntries == that.numEntries && numDeletes == that.numDeletes && numEntries == that.numEntries && numDeletes == that.numDeletes
&& Objects.equals(columnFamilyName, that.columnFamilyName); && Objects.equals(columnFamilyName, that.columnFamilyName);

@ -64,7 +64,7 @@ public enum MemoryUsageType {
"Illegal value provided for MemoryUsageType."); "Illegal value provided for MemoryUsageType.");
} }
MemoryUsageType(byte value) { MemoryUsageType(final byte value) {
value_ = value; value_ = value;
} }

@ -28,12 +28,12 @@ public class MemoryUtil {
* @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}. * @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}.
*/ */
public static Map<MemoryUsageType, Long> getApproximateMemoryUsageByType(final List<RocksDB> dbs, final Set<Cache> caches) { public static Map<MemoryUsageType, Long> getApproximateMemoryUsageByType(final List<RocksDB> dbs, final Set<Cache> caches) {
int dbCount = (dbs == null) ? 0 : dbs.size(); final int dbCount = (dbs == null) ? 0 : dbs.size();
int cacheCount = (caches == null) ? 0 : caches.size(); final int cacheCount = (caches == null) ? 0 : caches.size();
long[] dbHandles = new long[dbCount]; final long[] dbHandles = new long[dbCount];
long[] cacheHandles = new long[cacheCount]; final long[] cacheHandles = new long[cacheCount];
if (dbCount > 0) { if (dbCount > 0) {
ListIterator<RocksDB> dbIter = dbs.listIterator(); final ListIterator<RocksDB> dbIter = dbs.listIterator();
while (dbIter.hasNext()) { while (dbIter.hasNext()) {
dbHandles[dbIter.nextIndex()] = dbIter.next().nativeHandle_; dbHandles[dbIter.nextIndex()] = dbIter.next().nativeHandle_;
} }
@ -42,19 +42,19 @@ public class MemoryUtil {
// NOTE: This index handling is super ugly but I couldn't get a clean way to track both the // NOTE: This index handling is super ugly but I couldn't get a clean way to track both the
// index and the iterator simultaneously within a Set. // index and the iterator simultaneously within a Set.
int i = 0; int i = 0;
for (Cache cache : caches) { for (final Cache cache : caches) {
cacheHandles[i] = cache.nativeHandle_; cacheHandles[i] = cache.nativeHandle_;
i++; i++;
} }
} }
Map<Byte, Long> byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles); final Map<Byte, Long> byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles);
Map<MemoryUsageType, Long> output = new HashMap<>(); final Map<MemoryUsageType, Long> output = new HashMap<>();
for(Map.Entry<Byte, Long> longEntry : byteOutput.entrySet()) { for (final Map.Entry<Byte, Long> longEntry : byteOutput.entrySet()) {
output.put(MemoryUsageType.getMemoryUsageType(longEntry.getKey()), longEntry.getValue()); output.put(MemoryUsageType.getMemoryUsageType(longEntry.getKey()), longEntry.getValue());
} }
return output; return output;
} }
private native static Map<Byte, Long> getApproximateMemoryUsageByType(final long[] dbHandles, private static native Map<Byte, Long> getApproximateMemoryUsageByType(
final long[] cacheHandles); final long[] dbHandles, final long[] cacheHandles);
} }

@ -7,15 +7,13 @@ package org.rocksdb;
import java.util.*; import java.util.*;
public class MutableColumnFamilyOptions public class MutableColumnFamilyOptions extends AbstractMutableOptions {
extends AbstractMutableOptions {
/** /**
* User must use builder pattern, or parser. * User must use builder pattern, or parser.
* *
* @param keys the keys * @param keys the keys
* @param values the values * @param values the values
* * <p>
* See {@link #builder()} and {@link #parse(String)}. * See {@link #builder()} and {@link #parse(String)}.
*/ */
private MutableColumnFamilyOptions(final String[] keys, private MutableColumnFamilyOptions(final String[] keys,
@ -36,11 +34,11 @@ public class MutableColumnFamilyOptions
/** /**
* Parses a String representation of MutableColumnFamilyOptions * Parses a String representation of MutableColumnFamilyOptions
* * <p>
* The format is: key1=value1;key2=value2;key3=value3 etc * The format is: key1=value1;key2=value2;key3=value3 etc
* * <p>
* For int[] values, each int should be separated by a colon, e.g. * For int[] values, each int should be separated by a colon, e.g.
* * <p>
* key1=value1;intArrayKey1=1:2:3 * key1=value1;intArrayKey1=1:2:3
* *
* @param str The string representation of the mutable column family options * @param str The string representation of the mutable column family options
@ -157,8 +155,8 @@ public class MutableColumnFamilyOptions
public static class MutableColumnFamilyOptionsBuilder public static class MutableColumnFamilyOptionsBuilder
extends AbstractMutableOptionsBuilder<MutableColumnFamilyOptions, MutableColumnFamilyOptionsBuilder, MutableColumnFamilyOptionKey> extends AbstractMutableOptionsBuilder<MutableColumnFamilyOptions, MutableColumnFamilyOptionsBuilder, MutableColumnFamilyOptionKey>
implements MutableColumnFamilyOptionsInterface<MutableColumnFamilyOptionsBuilder> { implements MutableColumnFamilyOptionsInterface<MutableColumnFamilyOptionsBuilder> {
private static final Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP =
private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>(); new HashMap<>();
static { static {
for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) { for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key); ALL_KEYS_LOOKUP.put(key.name(), key);
@ -476,7 +474,7 @@ public class MutableColumnFamilyOptions
@Override @Override
public CompressionType compressionType() { public CompressionType compressionType() {
return (CompressionType) getEnum(MiscOption.compression); return getEnum(MiscOption.compression);
} }
@Override @Override
@ -549,7 +547,7 @@ public class MutableColumnFamilyOptions
@Override @Override
public CompressionType blobCompressionType() { public CompressionType blobCompressionType() {
return (CompressionType) getEnum(BlobOption.blob_compression_type); return getEnum(BlobOption.blob_compression_type);
} }
@Override @Override
@ -617,7 +615,7 @@ public class MutableColumnFamilyOptions
@Override @Override
public PrepopulateBlobCache prepopulateBlobCache() { public PrepopulateBlobCache prepopulateBlobCache() {
return (PrepopulateBlobCache) getEnum(BlobOption.prepopulate_blob_cache); return getEnum(BlobOption.prepopulate_blob_cache);
} }
} }
} }

@ -11,15 +11,15 @@ public interface MutableColumnFamilyOptionsInterface<
/** /**
* Amount of data to build up in memory (backed by an unsorted log * Amount of data to build up in memory (backed by an unsorted log
* on disk) before converting to a sorted on-disk file. * on disk) before converting to a sorted on-disk file.
* * <p>
* Larger values increase performance, especially during bulk loads. * Larger values increase performance, especially during bulk loads.
* Up to {@code max_write_buffer_number} write buffers may be held in memory * Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter * at the same time, so you may wish to adjust this parameter
* to control memory usage. * to control memory usage.
* * <p>
* Also, a larger write buffer will result in a longer recovery time * Also, a larger write buffer will result in a longer recovery time
* the next time the database is opened. * the next time the database is opened.
* * <p>
* Default: 64MB * Default: 64MB
* @param writeBufferSize the size of write buffer. * @param writeBufferSize the size of write buffer.
* @return the instance of the current object. * @return the instance of the current object.
@ -56,7 +56,7 @@ public interface MutableColumnFamilyOptionsInterface<
/** /**
* Number of files to trigger level-0 compaction. A value &lt; 0 means that * Number of files to trigger level-0 compaction. A value &lt; 0 means that
* level-0 compaction will not be triggered by number of files at all. * level-0 compaction will not be triggered by number of files at all.
* * <p>
* Default: 4 * Default: 4
* *
* @param level0FileNumCompactionTrigger The number of files to trigger * @param level0FileNumCompactionTrigger The number of files to trigger
@ -68,7 +68,7 @@ public interface MutableColumnFamilyOptionsInterface<
/** /**
* Number of files to trigger level-0 compaction. A value &lt; 0 means that * Number of files to trigger level-0 compaction. A value &lt; 0 means that
* level-0 compaction will not be triggered by number of files at all. * level-0 compaction will not be triggered by number of files at all.
* * <p>
* Default: 4 * Default: 4
* *
* @return The number of files to trigger * @return The number of files to trigger
@ -109,7 +109,7 @@ public interface MutableColumnFamilyOptionsInterface<
* @param maxBytesForLevelBase maximum bytes for level base. * @param maxBytesForLevelBase maximum bytes for level base.
* *
* @return the reference to the current option. * @return the reference to the current option.
* * <p>
* See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)} * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)}
*/ */
T setMaxBytesForLevelBase( T setMaxBytesForLevelBase(
@ -127,7 +127,7 @@ public interface MutableColumnFamilyOptionsInterface<
* *
* @return the upper-bound of the total size of level-1 files * @return the upper-bound of the total size of level-1 files
* in bytes. * in bytes.
* * <p>
* See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()} * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()}
*/ */
long maxBytesForLevelBase(); long maxBytesForLevelBase();
@ -135,7 +135,7 @@ public interface MutableColumnFamilyOptionsInterface<
/** /**
* Compress blocks using the specified compression algorithm. This * Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically. * parameter can be changed dynamically.
* * <p>
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
* *
* @param compressionType Compression Type. * @param compressionType Compression Type.
@ -147,7 +147,7 @@ public interface MutableColumnFamilyOptionsInterface<
/** /**
* Compress blocks using the specified compression algorithm. This * Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically. * parameter can be changed dynamically.
* * <p>
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
* *
* @return Compression type. * @return Compression type.

@ -11,13 +11,12 @@ import java.util.Map;
import java.util.Objects; import java.util.Objects;
public class MutableDBOptions extends AbstractMutableOptions { public class MutableDBOptions extends AbstractMutableOptions {
/** /**
* User must use builder pattern, or parser. * User must use builder pattern, or parser.
* *
* @param keys the keys * @param keys the keys
* @param values the values * @param values the values
* * <p>
* See {@link #builder()} and {@link #parse(String)}. * See {@link #builder()} and {@link #parse(String)}.
*/ */
private MutableDBOptions(final String[] keys, final String[] values) { private MutableDBOptions(final String[] keys, final String[] values) {
@ -37,11 +36,11 @@ public class MutableDBOptions extends AbstractMutableOptions {
/** /**
* Parses a String representation of MutableDBOptions * Parses a String representation of MutableDBOptions
* * <p>
* The format is: key1=value1;key2=value2;key3=value3 etc * The format is: key1=value1;key2=value2;key3=value3 etc
* * <p>
* For int[] values, each int should be separated by a comma, e.g. * For int[] values, each int should be separated by a comma, e.g.
* * <p>
* key1=value1;intArrayKey1=1:2:3 * key1=value1;intArrayKey1=1:2:3
* *
* @param str The string representation of the mutable db options * @param str The string representation of the mutable db options
@ -49,7 +48,7 @@ public class MutableDBOptions extends AbstractMutableOptions {
* *
* @return A builder for the mutable db options * @return A builder for the mutable db options
*/ */
public static MutableDBOptionsBuilder parse(final String str, boolean ignoreUnknown) { public static MutableDBOptionsBuilder parse(final String str, final boolean ignoreUnknown) {
Objects.requireNonNull(str); Objects.requireNonNull(str);
final List<OptionString.Entry> parsedOptions = OptionString.Parser.parse(str); final List<OptionString.Entry> parsedOptions = OptionString.Parser.parse(str);
@ -93,8 +92,7 @@ public class MutableDBOptions extends AbstractMutableOptions {
public static class MutableDBOptionsBuilder public static class MutableDBOptionsBuilder
extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey> extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey>
implements MutableDBOptionsInterface<MutableDBOptionsBuilder> { implements MutableDBOptionsInterface<MutableDBOptionsBuilder> {
private static final Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
private final static Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
static { static {
for(final MutableDBOptionKey key : DBOption.values()) { for(final MutableDBOptionKey key : DBOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key); ALL_KEYS_LOOKUP.put(key.name(), key);

@ -27,7 +27,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* `max_background_jobs = max_background_compactions + max_background_flushes` * `max_background_jobs = max_background_compactions + max_background_flushes`
* in the case where user sets at least one of `max_background_compactions` or * in the case where user sets at least one of `max_background_compactions` or
* `max_background_flushes` (we replace -1 by 1 in case one option is unset). * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
* * <p>
* Specifies the maximum number of concurrent background compaction jobs, * Specifies the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool. * submitted to the default LOW priority thread pool.
* If you're increasing this, also consider increasing number of threads in * If you're increasing this, also consider increasing number of threads in
@ -52,7 +52,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* `max_background_jobs = max_background_compactions + max_background_flushes` * `max_background_jobs = max_background_compactions + max_background_flushes`
* in the case where user sets at least one of `max_background_compactions` or * in the case where user sets at least one of `max_background_compactions` or
* `max_background_flushes` (we replace -1 by 1 in case one option is unset). * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
* * <p>
* Returns the maximum number of concurrent background compaction jobs, * Returns the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool. * submitted to the default LOW priority thread pool.
* When increasing this number, we may also want to consider increasing * When increasing this number, we may also want to consider increasing
@ -72,9 +72,9 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* By default RocksDB will flush all memtables on DB close if there are * By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST. * DB close. Unpersisted data WILL BE LOST.
* * <p>
* DEFAULT: false * DEFAULT: false
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API. * API.
@ -90,9 +90,9 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* By default RocksDB will flush all memtables on DB close if there are * By default RocksDB will flush all memtables on DB close if there are
* unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
* DB close. Unpersisted data WILL BE LOST. * DB close. Unpersisted data WILL BE LOST.
* * <p>
* DEFAULT: false * DEFAULT: false
* * <p>
* Dynamically changeable through * Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API. * API.
@ -105,7 +105,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* This is the maximum buffer size that is used by WritableFileWriter. * This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes. * On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit. * We allow the buffer to grow until it's size hits the limit.
* * <p>
* Default: 1024 * 1024 (1 MB) * Default: 1024 * 1024 (1 MB)
* *
* @param writableFileMaxBufferSize the maximum buffer size * @param writableFileMaxBufferSize the maximum buffer size
@ -118,7 +118,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* This is the maximum buffer size that is used by WritableFileWriter. * This is the maximum buffer size that is used by WritableFileWriter.
* On Windows, we need to maintain an aligned buffer for writes. * On Windows, we need to maintain an aligned buffer for writes.
* We allow the buffer to grow until it's size hits the limit. * We allow the buffer to grow until it's size hits the limit.
* * <p>
* Default: 1024 * 1024 (1 MB) * Default: 1024 * 1024 (1 MB)
* *
* @return the maximum buffer size * @return the maximum buffer size
@ -137,11 +137,11 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* if it is not empty, or 16MB if `rater_limiter` is empty. Note that * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
* if users change the rate in `rate_limiter` after DB is opened, * if users change the rate in `rate_limiter` after DB is opened,
* `delayed_write_rate` won't be adjusted. * `delayed_write_rate` won't be adjusted.
* * <p>
* Unit: bytes per second. * Unit: bytes per second.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
* *
* @param delayedWriteRate the rate in bytes per second * @param delayedWriteRate the rate in bytes per second
@ -162,11 +162,11 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* if it is not empty, or 16MB if `rater_limiter` is empty. Note that * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
* if users change the rate in `rate_limiter` after DB is opened, * if users change the rate in `rate_limiter` after DB is opened,
* `delayed_write_rate` won't be adjusted. * `delayed_write_rate` won't be adjusted.
* * <p>
* Unit: bytes per second. * Unit: bytes per second.
* * <p>
* Default: 0 * Default: 0
* * <p>
* Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
* *
* @return the rate in bytes per second * @return the rate in bytes per second
@ -358,7 +358,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
/** /**
* Same as {@link #setBytesPerSync(long)} , but applies to WAL files * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
* * <p>
* Default: 0, turned off * Default: 0, turned off
* *
* @param walBytesPerSync size in bytes * @param walBytesPerSync size in bytes
@ -368,7 +368,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
/** /**
* Same as {@link #bytesPerSync()} , but applies to WAL files * Same as {@link #bytesPerSync()} , but applies to WAL files
* * <p>
* Default: 0, turned off * Default: 0, turned off
* *
* @return size in bytes * @return size in bytes
@ -383,7 +383,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* during file generation, which can lead to a huge sync when the file is * during file generation, which can lead to a huge sync when the file is
* finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()} * finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()}
* properly configured. * properly configured.
* * <p>
* - If `sync_file_range` is supported it achieves this by waiting for any * - If `sync_file_range` is supported it achieves this by waiting for any
* prior `sync_file_range`s to finish before proceeding. In this way, * prior `sync_file_range`s to finish before proceeding. In this way,
* processing (compression, etc.) can proceed uninhibited in the gap * processing (compression, etc.) can proceed uninhibited in the gap
@ -391,11 +391,11 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
* behind. * behind.
* - Otherwise the `WritableFile::Sync` method is used. Note this mechanism * - Otherwise the `WritableFile::Sync` method is used. Note this mechanism
* always blocks, thus preventing the interleaving of I/O and processing. * always blocks, thus preventing the interleaving of I/O and processing.
* * <p>
* Note: Enabling this option does not provide any additional persistence * Note: Enabling this option does not provide any additional persistence
* guarantees, as it may use `sync_file_range`, which does not write out * guarantees, as it may use `sync_file_range`, which does not write out
* metadata. * metadata.
* * <p>
* Default: false * Default: false
* *
* @param strictBytesPerSync the bytes per sync * @param strictBytesPerSync the bytes per sync
@ -405,7 +405,7 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
/** /**
* Return the strict byte limit per sync. * Return the strict byte limit per sync.
* * <p>
* See {@link #setStrictBytesPerSync(boolean)} * See {@link #setStrictBytesPerSync(boolean)}
* *
* @return the limit in bytes. * @return the limit in bytes.
@ -415,9 +415,9 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
/** /**
* If non-zero, we perform bigger reads when doing compaction. If you're * If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB. * running RocksDB on spinning disks, you should set this to at least 2MB.
* * <p>
* That way RocksDB's compaction is doing sequential instead of random reads. * That way RocksDB's compaction is doing sequential instead of random reads.
* * <p>
* Default: 0 * Default: 0
* *
* @param compactionReadaheadSize The compaction read-ahead size * @param compactionReadaheadSize The compaction read-ahead size
@ -429,9 +429,9 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
/** /**
* If non-zero, we perform bigger reads when doing compaction. If you're * If non-zero, we perform bigger reads when doing compaction. If you're
* running RocksDB on spinning disks, you should set this to at least 2MB. * running RocksDB on spinning disks, you should set this to at least 2MB.
* * <p>
* That way RocksDB's compaction is doing sequential instead of random reads. * That way RocksDB's compaction is doing sequential instead of random reads.
* * <p>
* Default: 0 * Default: 0
* *
* @return The compaction read-ahead size * @return The compaction read-ahead size

@ -13,8 +13,7 @@ public abstract class MutableOptionValue<T> {
abstract String asString(); abstract String asString();
abstract T asObject(); abstract T asObject();
private static abstract class MutableOptionValueObject<T> private abstract static class MutableOptionValueObject<T> extends MutableOptionValue<T> {
extends MutableOptionValue<T> {
protected final T value; protected final T value;
protected MutableOptionValueObject(final T value) { protected MutableOptionValueObject(final T value) {

@ -10,7 +10,7 @@ import java.nio.ByteBuffer;
/** /**
* A simple abstraction to allow a Java class to wrap a custom comparator * A simple abstraction to allow a Java class to wrap a custom comparator
* implemented in C++. * implemented in C++.
* * <p>
* The native comparator must directly extend rocksdb::Comparator. * The native comparator must directly extend rocksdb::Comparator.
*/ */
public abstract class NativeComparatorWrapper public abstract class NativeComparatorWrapper

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* The type used to refer to a thread operation. * The type used to refer to a thread operation.
* * <p>
* A thread operation describes high-level action of a thread, * A thread operation describes high-level action of a thread,
* examples include compaction and flush. * examples include compaction and flush.
*/ */

@ -94,16 +94,15 @@ public class OptimisticTransactionDB extends RocksDB
return otdb; return otdb;
} }
/** /**
* This is similar to {@link #close()} except that it * This is similar to {@link #close()} except that it
* throws an exception if any error occurs. * throws an exception if any error occurs.
* * <p>
* This will not fsync the WAL files. * This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()} * If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true. * with {@link WriteOptions#setSync(boolean)} set to true.
* * <p>
* See also {@link #close()}. * See also {@link #close()}.
* *
* @throws RocksDBException if an error occurs whilst closing. * @throws RocksDBException if an error occurs whilst closing.
@ -121,12 +120,12 @@ public class OptimisticTransactionDB extends RocksDB
/** /**
* This is similar to {@link #closeE()} except that it * This is similar to {@link #closeE()} except that it
* silently ignores any errors. * silently ignores any errors.
* * <p>
* This will not fsync the WAL files. * This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()} * If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true. * with {@link WriteOptions#setSync(boolean)} set to true.
* * <p>
* See also {@link #close()}. * See also {@link #close()}.
*/ */
@Override @Override
@ -209,8 +208,7 @@ public class OptimisticTransactionDB extends RocksDB
final String path) throws RocksDBException; final String path) throws RocksDBException;
protected static native long[] open(final long handle, final String path, protected static native long[] open(final long handle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions); final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
private native static void closeDatabase(final long handle) private static native void closeDatabase(final long handle) throws RocksDBException;
throws RocksDBException;
private native long beginTransaction(final long handle, private native long beginTransaction(final long handle,
final long writeOptionsHandle); final long writeOptionsHandle);
private native long beginTransaction(final long handle, private native long beginTransaction(final long handle,

@ -43,7 +43,7 @@ public class OptimisticTransactionOptions extends RocksObject
return this; return this;
} }
private native static long newOptimisticTransactionOptions(); private static native long newOptimisticTransactionOptions();
private native boolean isSetSnapshot(final long handle); private native boolean isSetSnapshot(final long handle);
private native void setSetSnapshot(final long handle, private native void setSetSnapshot(final long handle,
final boolean setSnapshot); final boolean setSnapshot);

@ -10,13 +10,13 @@ import java.util.List;
import java.util.Objects; import java.util.Objects;
public class OptionString { public class OptionString {
private final static char kvPairSeparator = ';'; private static final char kvPairSeparator = ';';
private final static char kvSeparator = '='; private static final char kvSeparator = '=';
private final static char complexValueBegin = '{'; private static final char complexValueBegin = '{';
private final static char complexValueEnd = '}'; private static final char complexValueEnd = '}';
private final static char wrappedValueBegin = '{'; private static final char wrappedValueBegin = '{';
private final static char wrappedValueEnd = '}'; private static final char wrappedValueEnd = '}';
private final static char arrayValueSeparator = ':'; private static final char arrayValueSeparator = ':';
static class Value { static class Value {
final List<String> list; final List<String> list;

@ -11,7 +11,7 @@ import java.util.*;
/** /**
* Options to control the behavior of a database. It will be used * Options to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
* * <p>
* As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
* and will be automatically released if opened in the preamble of a try with resources block. * and will be automatically released if opened in the preamble of a try with resources block.
*/ */
@ -33,7 +33,7 @@ public class Options extends RocksObject
if (properties == null || properties.size() == 0) { if (properties == null || properties.size() == 0) {
throw new IllegalArgumentException("Properties value must contain at least one value."); throw new IllegalArgumentException("Properties value must contain at least one value.");
} }
StringBuilder stringBuilder = new StringBuilder(); final StringBuilder stringBuilder = new StringBuilder();
for (final String name : properties.stringPropertyNames()) { for (final String name : properties.stringPropertyNames()) {
stringBuilder.append(name); stringBuilder.append(name);
stringBuilder.append("="); stringBuilder.append("=");
@ -45,7 +45,7 @@ public class Options extends RocksObject
/** /**
* Construct options for opening a RocksDB. * Construct options for opening a RocksDB.
* * <p>
* This constructor will create (by allocating a block of memory) * This constructor will create (by allocating a block of memory)
* an {@code rocksdb::Options} in the c++ side. * an {@code rocksdb::Options} in the c++ side.
*/ */
@ -71,13 +71,13 @@ public class Options extends RocksObject
/** /**
* Copy constructor for ColumnFamilyOptions. * Copy constructor for ColumnFamilyOptions.
* * <p>
* NOTE: This does a shallow copy, which means comparator, merge_operator * NOTE: This does a shallow copy, which means comparator, merge_operator
* and other pointers will be cloned! * and other pointers will be cloned!
* *
* @param other The Options to copy. * @param other The Options to copy.
*/ */
public Options(Options other) { public Options(final Options other) {
super(copyOptions(other.nativeHandle_)); super(copyOptions(other.nativeHandle_));
this.env_ = other.env_; this.env_ = other.env_;
this.memTableConfig_ = other.memTableConfig_; this.memTableConfig_ = other.memTableConfig_;
@ -179,8 +179,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options optimizeForPointLookup( public Options optimizeForPointLookup(final long blockCacheSizeMb) {
long blockCacheSizeMb) {
optimizeForPointLookup(nativeHandle_, optimizeForPointLookup(nativeHandle_,
blockCacheSizeMb); blockCacheSizeMb);
return this; return this;
@ -194,8 +193,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options optimizeLevelStyleCompaction( public Options optimizeLevelStyleCompaction(final long memtableMemoryBudget) {
long memtableMemoryBudget) {
optimizeLevelStyleCompaction(nativeHandle_, optimizeLevelStyleCompaction(nativeHandle_,
memtableMemoryBudget); memtableMemoryBudget);
return this; return this;
@ -388,8 +386,8 @@ public class Options extends RocksObject
assert(isOwningHandle()); assert(isOwningHandle());
final int len = dbPaths.size(); final int len = dbPaths.size();
final String paths[] = new String[len]; final String[] paths = new String[len];
final long targetSizes[] = new long[len]; final long[] targetSizes = new long[len];
int i = 0; int i = 0;
for(final DbPath dbPath : dbPaths) { for(final DbPath dbPath : dbPaths) {
@ -407,8 +405,8 @@ public class Options extends RocksObject
if(len == 0) { if(len == 0) {
return Collections.emptyList(); return Collections.emptyList();
} else { } else {
final String paths[] = new String[len]; final String[] paths = new String[len];
final long targetSizes[] = new long[len]; final long[] targetSizes = new long[len];
dbPaths(nativeHandle_, paths, targetSizes); dbPaths(nativeHandle_, paths, targetSizes);
@ -651,7 +649,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setMaxWriteBatchGroupSizeBytes(long maxWriteBatchGroupSizeBytes) { public Options setMaxWriteBatchGroupSizeBytes(final long maxWriteBatchGroupSizeBytes) {
setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes); setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes);
return this; return this;
} }
@ -1066,7 +1064,8 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setSkipCheckingSstFileSizesOnDbOpen(boolean skipCheckingSstFileSizesOnDbOpen) { public Options setSkipCheckingSstFileSizesOnDbOpen(
final boolean skipCheckingSstFileSizesOnDbOpen) {
setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen); setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen);
return this; return this;
} }
@ -1377,12 +1376,11 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setCompressionType(CompressionType compressionType) { public Options setCompressionType(final CompressionType compressionType) {
setCompressionType(nativeHandle_, compressionType.getValue()); setCompressionType(nativeHandle_, compressionType.getValue());
return this; return this;
} }
@Override @Override
public Options setBottommostCompressionType( public Options setBottommostCompressionType(
final CompressionType bottommostCompressionType) { final CompressionType bottommostCompressionType) {
@ -1442,7 +1440,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setNumLevels(int numLevels) { public Options setNumLevels(final int numLevels) {
setNumLevels(nativeHandle_, numLevels); setNumLevels(nativeHandle_, numLevels);
return this; return this;
} }
@ -1490,7 +1488,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setTargetFileSizeBase(long targetFileSizeBase) { public Options setTargetFileSizeBase(final long targetFileSizeBase) {
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
return this; return this;
} }
@ -1501,7 +1499,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setTargetFileSizeMultiplier(int multiplier) { public Options setTargetFileSizeMultiplier(final int multiplier) {
setTargetFileSizeMultiplier(nativeHandle_, multiplier); setTargetFileSizeMultiplier(nativeHandle_, multiplier);
return this; return this;
} }
@ -1662,7 +1660,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) { public Options setMaxSuccessiveMerges(final long maxSuccessiveMerges) {
setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
return this; return this;
} }
@ -1692,9 +1690,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options public Options setMemtableHugePageSize(final long memtableHugePageSize) {
setMemtableHugePageSize(
long memtableHugePageSize) {
setMemtableHugePageSize(nativeHandle_, setMemtableHugePageSize(nativeHandle_,
memtableHugePageSize); memtableHugePageSize);
return this; return this;
@ -1706,7 +1702,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) { public Options setSoftPendingCompactionBytesLimit(final long softPendingCompactionBytesLimit) {
setSoftPendingCompactionBytesLimit(nativeHandle_, setSoftPendingCompactionBytesLimit(nativeHandle_,
softPendingCompactionBytesLimit); softPendingCompactionBytesLimit);
return this; return this;
@ -1718,7 +1714,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) { public Options setHardPendingCompactionBytesLimit(final long hardPendingCompactionBytesLimit) {
setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit); setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
return this; return this;
} }
@ -1729,7 +1725,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) { public Options setLevel0FileNumCompactionTrigger(final int level0FileNumCompactionTrigger) {
setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger); setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
return this; return this;
} }
@ -1740,7 +1736,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) { public Options setLevel0SlowdownWritesTrigger(final int level0SlowdownWritesTrigger) {
setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger); setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
return this; return this;
} }
@ -1751,7 +1747,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) { public Options setLevel0StopWritesTrigger(final int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger); setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this; return this;
} }
@ -1762,7 +1758,8 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) { public Options setMaxBytesForLevelMultiplierAdditional(
final int[] maxBytesForLevelMultiplierAdditional) {
setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional); setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
return this; return this;
} }
@ -1773,7 +1770,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setParanoidFileChecks(boolean paranoidFileChecks) { public Options setParanoidFileChecks(final boolean paranoidFileChecks) {
setParanoidFileChecks(nativeHandle_, paranoidFileChecks); setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
return this; return this;
} }
@ -1892,7 +1889,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setAvoidUnnecessaryBlockingIO(boolean avoidUnnecessaryBlockingIO) { public Options setAvoidUnnecessaryBlockingIO(final boolean avoidUnnecessaryBlockingIO) {
setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO); setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO);
return this; return this;
} }
@ -1904,7 +1901,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setPersistStatsToDisk(boolean persistStatsToDisk) { public Options setPersistStatsToDisk(final boolean persistStatsToDisk) {
setPersistStatsToDisk(nativeHandle_, persistStatsToDisk); setPersistStatsToDisk(nativeHandle_, persistStatsToDisk);
return this; return this;
} }
@ -1916,7 +1913,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setWriteDbidToManifest(boolean writeDbidToManifest) { public Options setWriteDbidToManifest(final boolean writeDbidToManifest) {
setWriteDbidToManifest(nativeHandle_, writeDbidToManifest); setWriteDbidToManifest(nativeHandle_, writeDbidToManifest);
return this; return this;
} }
@ -1928,7 +1925,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setLogReadaheadSize(long logReadaheadSize) { public Options setLogReadaheadSize(final long logReadaheadSize) {
setLogReadaheadSize(nativeHandle_, logReadaheadSize); setLogReadaheadSize(nativeHandle_, logReadaheadSize);
return this; return this;
} }
@ -1940,7 +1937,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setBestEffortsRecovery(boolean bestEffortsRecovery) { public Options setBestEffortsRecovery(final boolean bestEffortsRecovery) {
setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery); setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery);
return this; return this;
} }
@ -1952,7 +1949,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setMaxBgErrorResumeCount(int maxBgerrorResumeCount) { public Options setMaxBgErrorResumeCount(final int maxBgerrorResumeCount) {
setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount); setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount);
return this; return this;
} }
@ -1964,7 +1961,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setBgerrorResumeRetryInterval(long bgerrorResumeRetryInterval) { public Options setBgerrorResumeRetryInterval(final long bgerrorResumeRetryInterval) {
setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval); setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval);
return this; return this;
} }
@ -1976,7 +1973,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) { public Options setSstPartitionerFactory(final SstPartitionerFactory sstPartitionerFactory) {
setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_); setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_);
this.sstPartitionerFactory_ = sstPartitionerFactory; this.sstPartitionerFactory_ = sstPartitionerFactory;
return this; return this;
@ -2038,7 +2035,7 @@ public class Options extends RocksObject
} }
@Override @Override
public Options setBlobCompressionType(CompressionType compressionType) { public Options setBlobCompressionType(final CompressionType compressionType) {
setBlobCompressionType(nativeHandle_, compressionType.getValue()); setBlobCompressionType(nativeHandle_, compressionType.getValue());
return this; return this;
} }
@ -2119,10 +2116,9 @@ public class Options extends RocksObject
// END options for blobs (integrated BlobDB) // END options for blobs (integrated BlobDB)
// //
private native static long newOptions(); private static native long newOptions();
private native static long newOptions(long dbOptHandle, private static native long newOptions(long dbOptHandle, long cfOptHandle);
long cfOptHandle); private static native long copyOptions(long handle);
private native static long copyOptions(long handle);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native void setEnv(long optHandle, long envHandle); private native void setEnv(long optHandle, long envHandle);
private native void prepareForBulkLoad(long handle); private native void prepareForBulkLoad(long handle);

@ -12,12 +12,12 @@ public class OptionsUtil {
* A static method to construct the DBOptions and ColumnFamilyDescriptors by * A static method to construct the DBOptions and ColumnFamilyDescriptors by
* loading the latest RocksDB options file stored in the specified rocksdb * loading the latest RocksDB options file stored in the specified rocksdb
* database. * database.
* * <p>
* Note that the all the pointer options (except table_factory, which will * Note that the all the pointer options (except table_factory, which will
* be described in more details below) will be initialized with the default * be described in more details below) will be initialized with the default
* values. Developers can further initialize them after this function call. * values. Developers can further initialize them after this function call.
* Below is an example list of pointer options which will be initialized. * Below is an example list of pointer options which will be initialized.
* * <p>
* - env * - env
* - memtable_factory * - memtable_factory
* - compaction_filter_factory * - compaction_filter_factory
@ -25,7 +25,7 @@ public class OptionsUtil {
* - comparator * - comparator
* - merge_operator * - merge_operator
* - compaction_filter * - compaction_filter
* * <p>
* For table_factory, this function further supports deserializing * For table_factory, this function further supports deserializing
* BlockBasedTableFactory and its BlockBasedTableOptions except the * BlockBasedTableFactory and its BlockBasedTableOptions except the
* pointer options of BlockBasedTableOptions (flush_block_policy_factory, * pointer options of BlockBasedTableOptions (flush_block_policy_factory,
@ -43,8 +43,9 @@ public class OptionsUtil {
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
* native library. * native library.
*/ */
public static void loadLatestOptions(ConfigOptions configOptions, String dbPath, public static void loadLatestOptions(final ConfigOptions configOptions, final String dbPath,
DBOptions dbOptions, List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException { final DBOptions dbOptions, final List<ColumnFamilyDescriptor> cfDescs)
throws RocksDBException {
loadLatestOptions(configOptions.nativeHandle_, dbPath, dbOptions.nativeHandle_, cfDescs); loadLatestOptions(configOptions.nativeHandle_, dbPath, dbOptions.nativeHandle_, cfDescs);
} }
@ -62,8 +63,9 @@ public class OptionsUtil {
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
* native library. * native library.
*/ */
public static void loadOptionsFromFile(ConfigOptions configOptions, String optionsFileName, public static void loadOptionsFromFile(final ConfigOptions configOptions,
DBOptions dbOptions, List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException { final String optionsFileName, final DBOptions dbOptions,
final List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException {
loadOptionsFromFile( loadOptionsFromFile(
configOptions.nativeHandle_, optionsFileName, dbOptions.nativeHandle_, cfDescs); configOptions.nativeHandle_, optionsFileName, dbOptions.nativeHandle_, cfDescs);
} }
@ -78,7 +80,8 @@ public class OptionsUtil {
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
* native library. * native library.
*/ */
public static String getLatestOptionsFileName(String dbPath, Env env) throws RocksDBException { public static String getLatestOptionsFileName(final String dbPath, final Env env)
throws RocksDBException {
return getLatestOptionsFileName(dbPath, env.nativeHandle_); return getLatestOptionsFileName(dbPath, env.nativeHandle_);
} }
@ -89,10 +92,10 @@ public class OptionsUtil {
private OptionsUtil() {} private OptionsUtil() {}
// native methods // native methods
private native static void loadLatestOptions(long cfgHandle, String dbPath, long dbOptionsHandle, private static native void loadLatestOptions(long cfgHandle, String dbPath, long dbOptionsHandle,
List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException; List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException;
private native static void loadOptionsFromFile(long cfgHandle, String optionsFileName, private static native void loadOptionsFromFile(long cfgHandle, String optionsFileName,
long dbOptionsHandle, List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException; long dbOptionsHandle, List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException;
private native static String getLatestOptionsFileName(String dbPath, long envHandle) private static native String getLatestOptionsFileName(String dbPath, long envHandle)
throws RocksDBException; throws RocksDBException;
} }

@ -18,9 +18,9 @@ public class PersistentCache extends RocksObject {
logger.nativeHandle_, optimizedForNvm)); logger.nativeHandle_, optimizedForNvm));
} }
private native static long newPersistentCache(final long envHandle, private static native long newPersistentCache(final long envHandle, final String path,
final String path, final long size, final long loggerHandle, final long size, final long loggerHandle, final boolean optimizedForNvm)
final boolean optimizedForNvm) throws RocksDBException; throws RocksDBException;
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
} }

@ -48,7 +48,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param keySize the length of the user key. * @param keySize the length of the user key.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setKeySize(int keySize) { public PlainTableConfig setKeySize(final int keySize) {
keySize_ = keySize; keySize_ = keySize;
return this; return this;
} }
@ -68,7 +68,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param bitsPerKey the number of bits per key for bloom filer. * @param bitsPerKey the number of bits per key for bloom filer.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) { public PlainTableConfig setBloomBitsPerKey(final int bitsPerKey) {
bloomBitsPerKey_ = bitsPerKey; bloomBitsPerKey_ = bitsPerKey;
return this; return this;
} }
@ -89,7 +89,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param ratio the hash table ratio. * @param ratio the hash table ratio.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setHashTableRatio(double ratio) { public PlainTableConfig setHashTableRatio(final double ratio) {
hashTableRatio_ = ratio; hashTableRatio_ = ratio;
return this; return this;
} }
@ -110,7 +110,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param sparseness the index sparseness. * @param sparseness the index sparseness.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setIndexSparseness(int sparseness) { public PlainTableConfig setIndexSparseness(final int sparseness) {
indexSparseness_ = sparseness; indexSparseness_ = sparseness;
return this; return this;
} }
@ -134,7 +134,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param hugePageTlbSize huge page tlb size * @param hugePageTlbSize huge page tlb size
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) { public PlainTableConfig setHugePageTlbSize(final int hugePageTlbSize) {
this.hugePageTlbSize_ = hugePageTlbSize; this.hugePageTlbSize_ = hugePageTlbSize;
return this; return this;
} }
@ -166,7 +166,7 @@ public class PlainTableConfig extends TableFormatConfig {
* @param encodingType {@link org.rocksdb.EncodingType} value. * @param encodingType {@link org.rocksdb.EncodingType} value.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setEncodingType(EncodingType encodingType) { public PlainTableConfig setEncodingType(final EncodingType encodingType) {
this.encodingType_ = encodingType; this.encodingType_ = encodingType;
return this; return this;
} }
@ -188,7 +188,7 @@ public class PlainTableConfig extends TableFormatConfig {
* scan mode shall be enabled. * scan mode shall be enabled.
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setFullScanMode(boolean fullScanMode) { public PlainTableConfig setFullScanMode(final boolean fullScanMode) {
this.fullScanMode_ = fullScanMode; this.fullScanMode_ = fullScanMode;
return this; return this;
} }
@ -212,7 +212,7 @@ public class PlainTableConfig extends TableFormatConfig {
* be stored in a file * be stored in a file
* @return the reference to the current config. * @return the reference to the current config.
*/ */
public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) { public PlainTableConfig setStoreIndexInFile(final boolean storeIndexInFile) {
this.storeIndexInFile_ = storeIndexInFile; this.storeIndexInFile_ = storeIndexInFile;
return this; return this;
} }

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* The class that controls the get behavior. * The class that controls the get behavior.
* * <p>
* Note that dispose() must be called before an Options instance * Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++. * become out-of-scope to release the allocated memory in c++.
*/ */
@ -27,13 +27,13 @@ public class ReadOptions extends RocksObject {
/** /**
* Copy constructor. * Copy constructor.
* * <p>
* NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound * NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound
* and other pointers will be cloned! * and other pointers will be cloned!
* *
* @param other The ReadOptions to copy. * @param other The ReadOptions to copy.
*/ */
public ReadOptions(ReadOptions other) { public ReadOptions(final ReadOptions other) {
super(copyReadOptions(other.nativeHandle_)); super(copyReadOptions(other.nativeHandle_));
this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_; this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_; this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
@ -106,7 +106,7 @@ public class ReadOptions extends RocksObject {
*/ */
public Snapshot snapshot() { public Snapshot snapshot() {
assert(isOwningHandle()); assert(isOwningHandle());
long snapshotHandle = snapshot(nativeHandle_); final long snapshotHandle = snapshot(nativeHandle_);
if (snapshotHandle != 0) { if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle); return new Snapshot(snapshotHandle);
} }
@ -128,7 +128,7 @@ public class ReadOptions extends RocksObject {
if (snapshot != null) { if (snapshot != null) {
setSnapshot(nativeHandle_, snapshot.nativeHandle_); setSnapshot(nativeHandle_, snapshot.nativeHandle_);
} else { } else {
setSnapshot(nativeHandle_, 0l); setSnapshot(nativeHandle_, 0L);
} }
return this; return this;
} }
@ -256,7 +256,7 @@ public class ReadOptions extends RocksObject {
* Enforce that the iterator only iterates over the same prefix as the seek. * Enforce that the iterator only iterates over the same prefix as the seek.
* This option is effective only for prefix seeks, i.e. prefix_extractor is * This option is effective only for prefix seeks, i.e. prefix_extractor is
* non-null for the column family and {@link #totalOrderSeek()} is false. * non-null for the column family and {@link #totalOrderSeek()} is false.
* Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only * Unlike iterate_upper_bound, {@code #setPrefixSameAsStart(boolean)} only
* works within a prefix but in both directions. * works within a prefix but in both directions.
* *
* @param prefixSameAsStart if true, then the iterator only iterates over the * @param prefixSameAsStart if true, then the iterator only iterates over the
@ -300,7 +300,7 @@ public class ReadOptions extends RocksObject {
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files * schedule a background job in the flush job queue and delete obsolete files
* in background. * in background.
* * <p>
* Default: false * Default: false
* *
* @return true when PurgeObsoleteFile is called in CleanupIteratorState * @return true when PurgeObsoleteFile is called in CleanupIteratorState
@ -314,7 +314,7 @@ public class ReadOptions extends RocksObject {
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files * schedule a background job in the flush job queue and delete obsolete files
* in background. * in background.
* * <p>
* Default: false * Default: false
* *
* @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
@ -333,7 +333,7 @@ public class ReadOptions extends RocksObject {
* If non-zero, NewIterator will create a new table reader which * If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (&gt; 2MB) can * performs reads of the given size. Using a large size (&gt; 2MB) can
* improve the performance of forward iteration on spinning disks. * improve the performance of forward iteration on spinning disks.
* * <p>
* Default: 0 * Default: 0
* *
* @return The readahead size is bytes * @return The readahead size is bytes
@ -347,7 +347,7 @@ public class ReadOptions extends RocksObject {
* If non-zero, NewIterator will create a new table reader which * If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (&gt; 2MB) can * performs reads of the given size. Using a large size (&gt; 2MB) can
* improve the performance of forward iteration on spinning disks. * improve the performance of forward iteration on spinning disks.
* * <p>
* Default: 0 * Default: 0
* *
* @param readaheadSize The readahead size is bytes * @param readaheadSize The readahead size is bytes
@ -375,7 +375,7 @@ public class ReadOptions extends RocksObject {
* A threshold for the number of keys that can be skipped before failing an * A threshold for the number of keys that can be skipped before failing an
* iterator seek as incomplete. The default value of 0 should be used to * iterator seek as incomplete. The default value of 0 should be used to
* never fail a request as incomplete, even on skipping too many keys. * never fail a request as incomplete, even on skipping too many keys.
* * <p>
* Default: 0 * Default: 0
* *
* @param maxSkippableInternalKeys the number of keys that can be skipped * @param maxSkippableInternalKeys the number of keys that can be skipped
@ -394,7 +394,7 @@ public class ReadOptions extends RocksObject {
* If true, keys deleted using the DeleteRange() API will be visible to * If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves * readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions. * read performance in DBs with many range deletions.
* * <p>
* Default: false * Default: false
* *
* @return true if keys deleted using the DeleteRange() API will be visible * @return true if keys deleted using the DeleteRange() API will be visible
@ -408,7 +408,7 @@ public class ReadOptions extends RocksObject {
* If true, keys deleted using the DeleteRange() API will be visible to * If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves * readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions. * read performance in DBs with many range deletions.
* * <p>
* Default: false * Default: false
* *
* @param ignoreRangeDeletions true if keys deleted using the DeleteRange() * @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
@ -425,14 +425,14 @@ public class ReadOptions extends RocksObject {
* Defines the smallest key at which the backward * Defines the smallest key at which the backward
* iterator can return an entry. Once the bound is passed, * iterator can return an entry. Once the bound is passed,
* {@link RocksIterator#isValid()} will be false. * {@link RocksIterator#isValid()} will be false.
* * <p>
* The lower bound is inclusive i.e. the bound value is a valid * The lower bound is inclusive i.e. the bound value is a valid
* entry. * entry.
* * <p>
* If prefix_extractor is not null, the Seek target and `iterate_lower_bound` * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed * need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain. * outside of prefix domain.
* * <p>
* Default: null * Default: null
* *
* @param iterateLowerBound Slice representing the lower bound * @param iterateLowerBound Slice representing the lower bound
@ -450,7 +450,7 @@ public class ReadOptions extends RocksObject {
/** /**
* Returns the smallest key at which the backward * Returns the smallest key at which the backward
* iterator can return an entry. * iterator can return an entry.
* * <p>
* The lower bound is inclusive i.e. the bound value is a valid entry. * The lower bound is inclusive i.e. the bound value is a valid entry.
* *
* @return the smallest key, or null if there is no lower bound defined. * @return the smallest key, or null if there is no lower bound defined.
@ -468,15 +468,15 @@ public class ReadOptions extends RocksObject {
/** /**
* Defines the extent up to which the forward iterator * Defines the extent up to which the forward iterator
* can returns entries. Once the bound is reached, * can return entries. Once the bound is reached,
* {@link RocksIterator#isValid()} will be false. * {@link RocksIterator#isValid()} will be false.
* * <p>
* The upper bound is exclusive i.e. the bound value is not a valid entry. * The upper bound is exclusive i.e. the bound value is not a valid entry.
* * <p>
* If prefix_extractor is not null, the Seek target and iterate_upper_bound * If prefix_extractor is not null, the Seek target and iterate_upper_bound
* need to have the same prefix. This is because ordering is not guaranteed * need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain. * outside of prefix domain.
* * <p>
* Default: null * Default: null
* *
* @param iterateUpperBound Slice representing the upper bound * @param iterateUpperBound Slice representing the upper bound
@ -494,7 +494,7 @@ public class ReadOptions extends RocksObject {
/** /**
* Returns the largest key at which the forward * Returns the largest key at which the forward
* iterator can return an entry. * iterator can return an entry.
* * <p>
* The upper bound is exclusive i.e. the bound value is not a valid entry. * The upper bound is exclusive i.e. the bound value is not a valid entry.
* *
* @return the largest key, or null if there is no upper bound defined. * @return the largest key, or null if there is no upper bound defined.
@ -516,7 +516,7 @@ public class ReadOptions extends RocksObject {
* properties of each table during iteration. If the callback returns false, * properties of each table during iteration. If the callback returns false,
* the table will not be scanned. This option only affects Iterators and has * the table will not be scanned. This option only affects Iterators and has
* no impact on point lookups. * no impact on point lookups.
* * <p>
* Default: null (every table will be scanned) * Default: null (every table will be scanned)
* *
* @param tableFilter the table filter for the callback. * @param tableFilter the table filter for the callback.
@ -568,7 +568,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned. * only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development, * The user-specified timestamp feature is still under active development,
* and the API is subject to change. * and the API is subject to change.
* * <p>
* Default: null * Default: null
* @see #iterStartTs() * @see #iterStartTs()
* @return Reference to timestamp or null if there is no timestamp defined. * @return Reference to timestamp or null if there is no timestamp defined.
@ -594,7 +594,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned. * only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development, * The user-specified timestamp feature is still under active development,
* and the API is subject to change. * and the API is subject to change.
* * <p>
* Default: null * Default: null
* @see #setIterStartTs(AbstractSlice) * @see #setIterStartTs(AbstractSlice)
* @param timestamp Slice representing the timestamp * @param timestamp Slice representing the timestamp
@ -618,7 +618,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned. * only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development, * The user-specified timestamp feature is still under active development,
* and the API is subject to change. * and the API is subject to change.
* * <p>
* Default: null * Default: null
* @return Reference to lower bound timestamp or null if there is no lower bound timestamp * @return Reference to lower bound timestamp or null if there is no lower bound timestamp
* defined. * defined.
@ -644,7 +644,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned. * only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development, * The user-specified timestamp feature is still under active development,
* and the API is subject to change. * and the API is subject to change.
* * <p>
* Default: null * Default: null
* *
* @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound * @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound
@ -727,7 +727,7 @@ public class ReadOptions extends RocksObject {
* It limits the maximum cumulative value size of the keys in batch while * It limits the maximum cumulative value size of the keys in batch while
* reading through MultiGet. Once the cumulative value size exceeds this * reading through MultiGet. Once the cumulative value size exceeds this
* soft limit then all the remaining keys are returned with status Aborted. * soft limit then all the remaining keys are returned with status Aborted.
* * <p>
* Default: {@code std::numeric_limits<uint64_t>::max()} * Default: {@code std::numeric_limits<uint64_t>::max()}
* @return actual valueSizeSofLimit * @return actual valueSizeSofLimit
*/ */
@ -740,7 +740,7 @@ public class ReadOptions extends RocksObject {
* It limits the maximum cumulative value size of the keys in batch while * It limits the maximum cumulative value size of the keys in batch while
* reading through MultiGet. Once the cumulative value size exceeds this * reading through MultiGet. Once the cumulative value size exceeds this
* soft limit then all the remaining keys are returned with status Aborted. * soft limit then all the remaining keys are returned with status Aborted.
* * <p>
* Default: {@code std::numeric_limits<uint64_t>::max()} * Default: {@code std::numeric_limits<uint64_t>::max()}
* *
* @param valueSizeSoftLimit the maximum cumulative value size of the keys * @param valueSizeSoftLimit the maximum cumulative value size of the keys
@ -765,10 +765,9 @@ public class ReadOptions extends RocksObject {
private AbstractSlice<?> timestampSlice_; private AbstractSlice<?> timestampSlice_;
private AbstractSlice<?> iterStartTs_; private AbstractSlice<?> iterStartTs_;
private native static long newReadOptions(); private static native long newReadOptions();
private native static long newReadOptions(final boolean verifyChecksums, private static native long newReadOptions(final boolean verifyChecksums, final boolean fillCache);
final boolean fillCache); private static native long copyReadOptions(long handle);
private native static long copyReadOptions(long handle);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
private native boolean verifyChecksums(long handle); private native boolean verifyChecksums(long handle);

@ -14,5 +14,5 @@ public class RemoveEmptyValueCompactionFilter
super(createNewRemoveEmptyValueCompactionFilter0()); super(createNewRemoveEmptyValueCompactionFilter0());
} }
private native static long createNewRemoveEmptyValueCompactionFilter0(); private static native long createNewRemoveEmptyValueCompactionFilter0();
} }

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* RestoreOptions to control the behavior of restore. * RestoreOptions to control the behavior of restore.
* * <p>
* Note that dispose() must be called before this instance become out-of-scope * Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++. * to release the allocated memory in c++.
* *
@ -27,6 +27,6 @@ public class RestoreOptions extends RocksObject {
super(newRestoreOptions(keepLogFiles)); super(newRestoreOptions(keepLogFiles));
} }
private native static long newRestoreOptions(boolean keepLogFiles); private static native long newRestoreOptions(boolean keepLogFiles);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
} }

@ -11,10 +11,10 @@ import java.util.List;
* RocksCallbackObject is similar to {@link RocksObject} but varies * RocksCallbackObject is similar to {@link RocksObject} but varies
* in its construction as it is designed for Java objects which have functions * in its construction as it is designed for Java objects which have functions
* which are called from C++ via JNI. * which are called from C++ via JNI.
* * <p>
* RocksCallbackObject is the base-class any RocksDB classes that acts as a * RocksCallbackObject is the base-class any RocksDB classes that acts as a
* callback from some underlying underlying native C++ {@code rocksdb} object. * callback from some underlying underlying native C++ {@code rocksdb} object.
* * <p>
* The use of {@code RocksObject} should always be preferred over * The use of {@code RocksObject} should always be preferred over
* {@link RocksCallbackObject} if callbacks are not required. * {@link RocksCallbackObject} if callbacks are not required.
*/ */

@ -9,10 +9,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.*;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import org.rocksdb.util.Environment; import org.rocksdb.util.Environment;
@ -343,7 +340,7 @@ public class RocksDB extends RocksObject {
* The factory constructor of RocksDB that opens a RocksDB instance in * The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified * Read-Only mode given the path to the database using the specified
* options and db path. * options and db path.
* * <p>
* Options instance *should* not be disposed before all DBs using this options * Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly, * instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically. * then this options instance will be GC'd automatically.
@ -365,7 +362,7 @@ public class RocksDB extends RocksObject {
* The factory constructor of RocksDB that opens a RocksDB instance in * The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified * Read-Only mode given the path to the database using the specified
* options and db path. * options and db path.
* * <p>
* Options instance *should* not be disposed before all DBs using this options * Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly, * instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically. * then this options instance will be GC'd automatically.
@ -501,7 +498,7 @@ public class RocksDB extends RocksObject {
/** /**
* Open DB as secondary instance with only the default column family. * Open DB as secondary instance with only the default column family.
* * <p>
* The secondary instance can dynamically tail the MANIFEST of * The secondary instance can dynamically tail the MANIFEST of
* a primary that must have already been created. User can call * a primary that must have already been created. User can call
* {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up
@ -538,7 +535,7 @@ public class RocksDB extends RocksObject {
/** /**
* Open DB as secondary instance with column families. * Open DB as secondary instance with column families.
* You can open a subset of column families in secondary mode. * You can open a subset of column families in secondary mode.
* * <p>
* The secondary instance can dynamically tail the MANIFEST of * The secondary instance can dynamically tail the MANIFEST of
* a primary that must have already been created. User can call * a primary that must have already been created. User can call
* {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up
@ -598,12 +595,12 @@ public class RocksDB extends RocksObject {
/** /**
* This is similar to {@link #close()} except that it * This is similar to {@link #close()} except that it
* throws an exception if any error occurs. * throws an exception if any error occurs.
* * <p>
* This will not fsync the WAL files. * This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()} * If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true. * with {@link WriteOptions#setSync(boolean)} set to true.
* * <p>
* See also {@link #close()}. * See also {@link #close()}.
* *
* @throws RocksDBException if an error occurs whilst closing. * @throws RocksDBException if an error occurs whilst closing.
@ -626,12 +623,12 @@ public class RocksDB extends RocksObject {
/** /**
* This is similar to {@link #closeE()} except that it * This is similar to {@link #closeE()} except that it
* silently ignores any errors. * silently ignores any errors.
* * <p>
* This will not fsync the WAL files. * This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()} * If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true. * with {@link WriteOptions#setSync(boolean)} set to true.
* * <p>
* See also {@link #close()}. * See also {@link #close()}.
*/ */
@Override @Override
@ -711,8 +708,8 @@ public class RocksDB extends RocksObject {
columnFamilyOptions.nativeHandle_, cfNames); columnFamilyOptions.nativeHandle_, cfNames);
final List<ColumnFamilyHandle> columnFamilyHandles = final List<ColumnFamilyHandle> columnFamilyHandles =
new ArrayList<>(cfHandles.length); new ArrayList<>(cfHandles.length);
for (int i = 0; i < cfHandles.length; i++) { for (final long cfHandle : cfHandles) {
final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandle);
columnFamilyHandles.add(columnFamilyHandle); columnFamilyHandles.add(columnFamilyHandle);
} }
ownedColumnFamilyHandles.addAll(columnFamilyHandles); ownedColumnFamilyHandles.addAll(columnFamilyHandles);
@ -744,8 +741,8 @@ public class RocksDB extends RocksObject {
cfOptsHandles, cfNames); cfOptsHandles, cfNames);
final List<ColumnFamilyHandle> columnFamilyHandles = final List<ColumnFamilyHandle> columnFamilyHandles =
new ArrayList<>(cfHandles.length); new ArrayList<>(cfHandles.length);
for (int i = 0; i < cfHandles.length; i++) { for (final long cfHandle : cfHandles) {
final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandle);
columnFamilyHandles.add(columnFamilyHandle); columnFamilyHandles.add(columnFamilyHandle);
} }
ownedColumnFamilyHandles.addAll(columnFamilyHandles); ownedColumnFamilyHandles.addAll(columnFamilyHandles);
@ -846,7 +843,7 @@ public class RocksDB extends RocksObject {
* instance * instance
* @param key the specified key to be inserted. * @param key the specified key to be inserted.
* @param value the value associated with the specified key. * @param value the value associated with the specified key.
* * <p>
* throws IllegalArgumentException if column family is not present * throws IllegalArgumentException if column family is not present
* *
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
@ -943,7 +940,7 @@ public class RocksDB extends RocksObject {
* @param writeOpts {@link org.rocksdb.WriteOptions} instance. * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key the specified key to be inserted. * @param key the specified key to be inserted.
* @param value the value associated with the specified key. * @param value the value associated with the specified key.
* * <p>
* throws IllegalArgumentException if column family is not present * throws IllegalArgumentException if column family is not present
* *
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
@ -968,7 +965,7 @@ public class RocksDB extends RocksObject {
* Supports direct buffer only. * Supports direct buffer only.
* @param value the value associated with the specified key. Position and limit is used. * @param value the value associated with the specified key. Position and limit is used.
* Supports direct buffer only. * Supports direct buffer only.
* * <p>
* throws IllegalArgumentException if column family is not present * throws IllegalArgumentException if column family is not present
* *
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
@ -992,7 +989,7 @@ public class RocksDB extends RocksObject {
* Supports direct buffer only. * Supports direct buffer only.
* @param value the value associated with the specified key. Position and limit is used. * @param value the value associated with the specified key. Position and limit is used.
* Supports direct buffer only. * Supports direct buffer only.
* * <p>
* throws IllegalArgumentException if column family is not present * throws IllegalArgumentException if column family is not present
* *
* @throws RocksDBException thrown if error happens in underlying * @throws RocksDBException thrown if error happens in underlying
@ -1215,8 +1212,8 @@ public class RocksDB extends RocksObject {
public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value) public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value)
throws RocksDBException { throws RocksDBException {
assert key.isDirect() && value.isDirect(); assert key.isDirect() && value.isDirect();
int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), final int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(),
value, value.position(), value.remaining(), 0); key.remaining(), value, value.position(), value.remaining(), 0);
if (result != NOT_FOUND) { if (result != NOT_FOUND) {
value.limit(Math.min(value.limit(), value.position() + result)); value.limit(Math.min(value.limit(), value.position() + result));
} }
@ -1248,8 +1245,9 @@ public class RocksDB extends RocksObject {
public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt, public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt,
final ByteBuffer key, final ByteBuffer value) throws RocksDBException { final ByteBuffer key, final ByteBuffer value) throws RocksDBException {
assert key.isDirect() && value.isDirect(); assert key.isDirect() && value.isDirect();
int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), final int result =
value, value.position(), value.remaining(), columnFamilyHandle.nativeHandle_); getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), value,
value.position(), value.remaining(), columnFamilyHandle.nativeHandle_);
if (result != NOT_FOUND) { if (result != NOT_FOUND) {
value.limit(Math.min(value.limit(), value.position() + result)); value.limit(Math.min(value.limit(), value.position() + result));
} }
@ -1261,12 +1259,12 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists * Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist * and was not overwritten. It is not an error if the key did not exist
* in the database. * in the database.
* * <p>
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined. * times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put() * SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key. * for this key since the previous call to SingleDelete() for this key.
* * <p>
* This feature is currently an experimental performance optimization * This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that * for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or * SingleDelete is only used for a key that is not deleted using Delete() or
@ -1287,12 +1285,12 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists * Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist * and was not overwritten. It is not an error if the key did not exist
* in the database. * in the database.
* * <p>
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined. * times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put() * SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key. * for this key since the previous call to SingleDelete() for this key.
* * <p>
* This feature is currently an experimental performance optimization * This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that * for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or * SingleDelete is only used for a key that is not deleted using Delete() or
@ -1316,18 +1314,18 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists * Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist * and was not overwritten. It is not an error if the key did not exist
* in the database. * in the database.
* * <p>
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined. * times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put() * SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key. * for this key since the previous call to SingleDelete() for this key.
* * <p>
* This feature is currently an experimental performance optimization * This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that * for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or * SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and * written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior. * Merges can result in undefined behavior.
* * <p>
* Note: consider setting {@link WriteOptions#setSync(boolean)} true. * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
* *
* @param writeOpt Write options for the delete * @param writeOpt Write options for the delete
@ -1346,18 +1344,18 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists * Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist * and was not overwritten. It is not an error if the key did not exist
* in the database. * in the database.
* * <p>
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined. * times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put() * SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key. * for this key since the previous call to SingleDelete() for this key.
* * <p>
* This feature is currently an experimental performance optimization * This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that * for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or * SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and * written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior. * Merges can result in undefined behavior.
* * <p>
* Note: consider setting {@link WriteOptions#setSync(boolean)} true. * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
* *
* @param columnFamilyHandle The column family to delete the key from * @param columnFamilyHandle The column family to delete the key from
@ -1374,12 +1372,11 @@ public class RocksDB extends RocksObject {
columnFamilyHandle.nativeHandle_); columnFamilyHandle.nativeHandle_);
} }
/** /**
* Removes the database entries in the range ["beginKey", "endKey"), i.e., * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It * including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey"). * is not an error if no keys exist in the range ["beginKey", "endKey").
* * <p>
* Delete the database entry (if any) for "key". Returns OK on success, and a * Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the * non-OK status on error. It is not an error if "key" did not exist in the
* database. * database.
@ -1400,7 +1397,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e., * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It * including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey"). * is not an error if no keys exist in the range ["beginKey", "endKey").
* * <p>
* Delete the database entry (if any) for "key". Returns OK on success, and a * Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the * non-OK status on error. It is not an error if "key" did not exist in the
* database. * database.
@ -1422,7 +1419,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e., * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It * including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey"). * is not an error if no keys exist in the range ["beginKey", "endKey").
* * <p>
* Delete the database entry (if any) for "key". Returns OK on success, and a * Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the * non-OK status on error. It is not an error if "key" did not exist in the
* database. * database.
@ -1444,7 +1441,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e., * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It * including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey"). * is not an error if no keys exist in the range ["beginKey", "endKey").
* * <p>
* Delete the database entry (if any) for "key". Returns OK on success, and a * Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the * non-OK status on error. It is not an error if "key" did not exist in the
* database. * database.
@ -1501,7 +1498,7 @@ public class RocksDB extends RocksObject {
* native library. * native library.
* @throws IndexOutOfBoundsException if an offset or length is out of bounds * @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/ */
public void merge(final byte[] key, int offset, int len, final byte[] value, public void merge(final byte[] key, final int offset, final int len, final byte[] value,
final int vOffset, final int vLen) throws RocksDBException { final int vOffset, final int vLen) throws RocksDBException {
checkBounds(offset, len, key.length); checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length); checkBounds(vOffset, vLen, value.length);
@ -2425,10 +2422,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a false negative. * positives, but never a false negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(byte[])}. One way to make this lighter weight is to avoid * {@link #get(byte[])}. One way to make this lighter weight is to avoid
* doing any IOs. * doing any IOs.
@ -2451,10 +2448,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a false negative. * positives, but never a false negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(byte[], int, int)}. One way to make this lighter weight is to * {@link #get(byte[], int, int)}. One way to make this lighter weight is to
* avoid doing any IOs. * avoid doing any IOs.
@ -2482,10 +2479,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a false negative. * positives, but never a false negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter * {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter
* weight is to avoid doing any IOs. * weight is to avoid doing any IOs.
@ -2511,10 +2508,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a false negative. * positives, but never a false negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this * {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this
* lighter weight is to avoid doing any IOs. * lighter weight is to avoid doing any IOs.
@ -2532,9 +2529,8 @@ public class RocksDB extends RocksObject {
* @return false if the key definitely does not exist in the database, * @return false if the key definitely does not exist in the database,
* otherwise true. * otherwise true.
*/ */
public boolean keyMayExist( public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final ColumnFamilyHandle columnFamilyHandle, final int offset, final int len,
final byte[] key, int offset, int len,
/* @Nullable */ final Holder<byte[]> valueHolder) { /* @Nullable */ final Holder<byte[]> valueHolder) {
return keyMayExist(columnFamilyHandle, null, key, offset, len, return keyMayExist(columnFamilyHandle, null, key, offset, len,
valueHolder); valueHolder);
@ -2545,10 +2541,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a true negative. * positives, but never a true negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ReadOptions, byte[])}. One way to make this * {@link #get(ReadOptions, byte[])}. One way to make this
* lighter weight is to avoid doing any IOs. * lighter weight is to avoid doing any IOs.
@ -2574,10 +2570,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a true negative. * positives, but never a true negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ReadOptions, byte[], int, int)}. One way to make this * {@link #get(ReadOptions, byte[], int, int)}. One way to make this
* lighter weight is to avoid doing any IOs. * lighter weight is to avoid doing any IOs.
@ -2608,10 +2604,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a true negative. * positives, but never a true negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this * {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this
* lighter weight is to avoid doing any IOs. * lighter weight is to avoid doing any IOs.
@ -2639,10 +2635,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist. * returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false * That is to say that this method is probabilistic and may return false
* positives, but never a false negative. * positives, but never a false negative.
* * <p>
* If the caller wants to obtain value when the key * If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set. * is found in memory, then {@code valueHolder} must be set.
* * <p>
* This check is potentially lighter-weight than invoking * This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}. * {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}.
* One way to make this lighter weight is to avoid doing any IOs. * One way to make this lighter weight is to avoid doing any IOs.
@ -2985,7 +2981,7 @@ public class RocksDB extends RocksObject {
* @return Snapshot {@link Snapshot} instance * @return Snapshot {@link Snapshot} instance
*/ */
public Snapshot getSnapshot() { public Snapshot getSnapshot() {
long snapshotHandle = getSnapshot(nativeHandle_); final long snapshotHandle = getSnapshot(nativeHandle_);
if (snapshotHandle != 0) { if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle); return new Snapshot(snapshotHandle);
} }
@ -2994,7 +2990,7 @@ public class RocksDB extends RocksObject {
/** /**
* Release a previously acquired snapshot. * Release a previously acquired snapshot.
* * <p>
* The caller must not use "snapshot" after this call. * The caller must not use "snapshot" after this call.
* *
* @param snapshot {@link Snapshot} instance * @param snapshot {@link Snapshot} instance
@ -3161,7 +3157,7 @@ public class RocksDB extends RocksObject {
/** /**
* Reset internal stats for DB and all column families. * Reset internal stats for DB and all column families.
* * <p>
* Note this doesn't reset {@link Options#statistics()} as it is not * Note this doesn't reset {@link Options#statistics()} as it is not
* owned by DB. * owned by DB.
* *
@ -3200,11 +3196,11 @@ public class RocksDB extends RocksObject {
/** /**
* Get the approximate file system space used by keys in each range. * Get the approximate file system space used by keys in each range.
* * <p>
* Note that the returned sizes measure file system space usage, so * Note that the returned sizes measure file system space usage, so
* if the user data compresses by a factor of ten, the returned * if the user data compresses by a factor of ten, the returned
* sizes will be one-tenth the size of the corresponding user data size. * sizes will be one-tenth the size of the corresponding user data size.
* * <p>
* If {@code sizeApproximationFlags} defines whether the returned size * If {@code sizeApproximationFlags} defines whether the returned size
* should include the recently written data in the mem-tables (if * should include the recently written data in the mem-tables (if
* the mem-table type supports it), data serialized to disk, or both. * the mem-table type supports it), data serialized to disk, or both.
@ -3236,11 +3232,11 @@ public class RocksDB extends RocksObject {
/** /**
* Get the approximate file system space used by keys in each range for * Get the approximate file system space used by keys in each range for
* the default column family. * the default column family.
* * <p>
* Note that the returned sizes measure file system space usage, so * Note that the returned sizes measure file system space usage, so
* if the user data compresses by a factor of ten, the returned * if the user data compresses by a factor of ten, the returned
* sizes will be one-tenth the size of the corresponding user data size. * sizes will be one-tenth the size of the corresponding user data size.
* * <p>
* If {@code sizeApproximationFlags} defines whether the returned size * If {@code sizeApproximationFlags} defines whether the returned size
* should include the recently written data in the mem-tables (if * should include the recently written data in the mem-tables (if
* the mem-table type supports it), data serialized to disk, or both. * the mem-table type supports it), data serialized to disk, or both.
@ -3450,7 +3446,7 @@ public class RocksDB extends RocksObject {
*/ */
public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions( public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions(
/* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException {
String optionsString = getOptions( final String optionsString = getOptions(
nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
return MutableColumnFamilyOptions.parse(optionsString, true); return MutableColumnFamilyOptions.parse(optionsString, true);
} }
@ -3477,7 +3473,7 @@ public class RocksDB extends RocksObject {
* resulting options string into options * resulting options string into options
*/ */
public MutableDBOptions.MutableDBOptionsBuilder getDBOptions() throws RocksDBException { public MutableDBOptions.MutableDBOptionsBuilder getDBOptions() throws RocksDBException {
String optionsString = getDBOptions(nativeHandle_); final String optionsString = getDBOptions(nativeHandle_);
return MutableDBOptions.parse(optionsString, true); return MutableDBOptions.parse(optionsString, true);
} }
@ -3511,7 +3507,7 @@ public class RocksDB extends RocksObject {
/** /**
* Takes a list of files specified by file names and * Takes a list of files specified by file names and
* compacts them to the specified level. * compacts them to the specified level.
* * <p>
* Note that the behavior is different from * Note that the behavior is different from
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* in that CompactFiles() performs the compaction job using the CURRENT * in that CompactFiles() performs the compaction job using the CURRENT
@ -3543,7 +3539,7 @@ public class RocksDB extends RocksObject {
/** /**
* Takes a list of files specified by file names and * Takes a list of files specified by file names and
* compacts them to the specified level. * compacts them to the specified level.
* * <p>
* Note that the behavior is different from * Note that the behavior is different from
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* in that CompactFiles() performs the compaction job using the CURRENT * in that CompactFiles() performs the compaction job using the CURRENT
@ -3586,7 +3582,7 @@ public class RocksDB extends RocksObject {
* returning. * returning.
* *
*/ */
public void cancelAllBackgroundWork(boolean wait) { public void cancelAllBackgroundWork(final boolean wait) {
cancelAllBackgroundWork(nativeHandle_, wait); cancelAllBackgroundWork(nativeHandle_, wait);
} }
@ -3614,11 +3610,11 @@ public class RocksDB extends RocksObject {
/** /**
* Enable automatic compactions for the given column * Enable automatic compactions for the given column
* families if they were previously disabled. * families if they were previously disabled.
* * <p>
* The function will first set the * The function will first set the
* {@link ColumnFamilyOptions#disableAutoCompactions()} option for each * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each
* column family to false, after which it will schedule a flush/compaction. * column family to false, after which it will schedule a flush/compaction.
* * <p>
* NOTE: Setting disableAutoCompactions to 'false' through * NOTE: Setting disableAutoCompactions to 'false' through
* {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* does NOT schedule a flush/compaction afterwards, and only changes the * does NOT schedule a flush/compaction afterwards, and only changes the
@ -3761,15 +3757,15 @@ public class RocksDB extends RocksObject {
/* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
throws RocksDBException { throws RocksDBException {
flush(flushOptions, flush(flushOptions,
columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle)); columnFamilyHandle == null ? null : Collections.singletonList(columnFamilyHandle));
} }
/** /**
* Flushes multiple column families. * Flushes multiple column families.
* * <p>
* If atomic flush is not enabled, this is equivalent to calling * If atomic flush is not enabled, this is equivalent to calling
* {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times. * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times.
* * <p>
* If atomic flush is enabled, this will flush all column families * If atomic flush is enabled, this will flush all column families
* specified up to the latest sequence number at the time when flush is * specified up to the latest sequence number at the time when flush is
* requested. * requested.
@ -3800,13 +3796,13 @@ public class RocksDB extends RocksObject {
/** /**
* Sync the WAL. * Sync the WAL.
* * <p>
* Note that {@link #write(WriteOptions, WriteBatch)} followed by * Note that {@link #write(WriteOptions, WriteBatch)} followed by
* {@link #syncWal()} is not exactly the same as * {@code #syncWal()} is not exactly the same as
* {@link #write(WriteOptions, WriteBatch)} with * {@link #write(WriteOptions, WriteBatch)} with
* {@link WriteOptions#sync()} set to true; In the latter case the changes * {@link WriteOptions#sync()} set to true; In the latter case the changes
* won't be visible until the sync is done. * won't be visible until the sync is done.
* * <p>
* Currently only works if {@link Options#allowMmapWrites()} is set to false. * Currently only works if {@link Options#allowMmapWrites()} is set to false.
* *
* @throws RocksDBException if an error occurs whilst syncing * @throws RocksDBException if an error occurs whilst syncing
@ -3884,7 +3880,7 @@ public class RocksDB extends RocksObject {
/** /**
* Retrieve the list of all files in the database after flushing the memtable. * Retrieve the list of all files in the database after flushing the memtable.
* * <p>
* See {@link #getLiveFiles(boolean)}. * See {@link #getLiveFiles(boolean)}.
* *
* @return the live files * @return the live files
@ -3898,14 +3894,14 @@ public class RocksDB extends RocksObject {
/** /**
* Retrieve the list of all files in the database. * Retrieve the list of all files in the database.
* * <p>
* In case you have multiple column families, even if {@code flushMemtable} * In case you have multiple column families, even if {@code flushMemtable}
* is true, you still need to call {@link #getSortedWalFiles()} * is true, you still need to call {@link #getSortedWalFiles()}
* after {@link #getLiveFiles(boolean)} to compensate for new data that * after {@code #getLiveFiles(boolean)} to compensate for new data that
* arrived to already-flushed column families while other column families * arrived to already-flushed column families while other column families
* were flushing. * were flushing.
* * <p>
* NOTE: Calling {@link #getLiveFiles(boolean)} followed by * NOTE: Calling {@code #getLiveFiles(boolean)} followed by
* {@link #getSortedWalFiles()} can generate a lossless backup. * {@link #getSortedWalFiles()} can generate a lossless backup.
* *
* @param flushMemtable set to true to flush before recoding the live * @param flushMemtable set to true to flush before recoding the live
@ -4016,7 +4012,7 @@ public class RocksDB extends RocksObject {
* ingest the file into this level (2). A file that have a key range that * ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable * overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file. * first before ingesting the file.
* * <p>
* (1) External SST files can be created using {@link SstFileWriter} * (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level * (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression * even if the file compression doesn't match the level compression
@ -4041,7 +4037,7 @@ public class RocksDB extends RocksObject {
* ingest the file into this level (2). A file that have a key range that * ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable * overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file. * first before ingesting the file.
* * <p>
* (1) External SST files can be created using {@link SstFileWriter} * (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level * (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression * even if the file compression doesn't match the level compression
@ -4207,7 +4203,7 @@ public class RocksDB extends RocksObject {
/** /**
* Trace DB operations. * Trace DB operations.
* * <p>
* Use {@link #endTrace()} to stop tracing. * Use {@link #endTrace()} to stop tracing.
* *
* @param traceOptions the options * @param traceOptions the options
@ -4219,7 +4215,7 @@ public class RocksDB extends RocksObject {
final AbstractTraceWriter traceWriter) throws RocksDBException { final AbstractTraceWriter traceWriter) throws RocksDBException {
startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(), startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(),
traceWriter.nativeHandle_); traceWriter.nativeHandle_);
/** /*
* NOTE: {@link #startTrace(long, long, long) transfers the ownership * NOTE: {@link #startTrace(long, long, long) transfers the ownership
* from Java to C++, so we must disown the native handle here. * from Java to C++, so we must disown the native handle here.
*/ */
@ -4228,7 +4224,7 @@ public class RocksDB extends RocksObject {
/** /**
* Stop tracing DB operations. * Stop tracing DB operations.
* * <p>
* See {@link #startTrace(TraceOptions, AbstractTraceWriter)} * See {@link #startTrace(TraceOptions, AbstractTraceWriter)}
* *
* @throws RocksDBException if an error occurs whilst ending the trace * @throws RocksDBException if an error occurs whilst ending the trace
@ -4314,7 +4310,7 @@ public class RocksDB extends RocksObject {
} }
private static long[] toRangeSliceHandles(final List<Range> ranges) { private static long[] toRangeSliceHandles(final List<Range> ranges) {
final long rangeSliceHandles[] = new long [ranges.size() * 2]; final long[] rangeSliceHandles = new long[ranges.size() * 2];
for (int i = 0, j = 0; i < ranges.size(); i++) { for (int i = 0, j = 0; i < ranges.size(); i++) {
final Range range = ranges.get(i); final Range range = ranges.get(i);
rangeSliceHandles[j++] = range.start.getNativeHandle(); rangeSliceHandles[j++] = range.start.getNativeHandle();
@ -4323,11 +4319,11 @@ public class RocksDB extends RocksObject {
return rangeSliceHandles; return rangeSliceHandles;
} }
protected void storeOptionsInstance(DBOptionsInterface<?> options) { protected void storeOptionsInstance(final DBOptionsInterface<?> options) {
options_ = options; options_ = options;
} }
private static void checkBounds(int offset, int len, int size) { private static void checkBounds(final int offset, final int len, final int size) {
if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) {
throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size));
} }
@ -4340,8 +4336,8 @@ public class RocksDB extends RocksObject {
} }
// native methods // native methods
private native static long open(final long optionsHandle, private static native long open(final long optionsHandle, final String path)
final String path) throws RocksDBException; throws RocksDBException;
/** /**
* @param optionsHandle Native handle pointing to an Options object * @param optionsHandle Native handle pointing to an Options object
@ -4355,11 +4351,10 @@ public class RocksDB extends RocksObject {
* *
* @throws RocksDBException thrown if the database could not be opened * @throws RocksDBException thrown if the database could not be opened
*/ */
private native static long[] open(final long optionsHandle, private static native long[] open(final long optionsHandle, final String path,
final String path, final byte[][] columnFamilyNames, final byte[][] columnFamilyNames, final long[] columnFamilyOptions) throws RocksDBException;
final long[] columnFamilyOptions) throws RocksDBException;
private native static long openROnly(final long optionsHandle, final String path, private static native long openROnly(final long optionsHandle, final String path,
final boolean errorIfWalFileExists) throws RocksDBException; final boolean errorIfWalFileExists) throws RocksDBException;
/** /**
@ -4374,31 +4369,30 @@ public class RocksDB extends RocksObject {
* *
* @throws RocksDBException thrown if the database could not be opened * @throws RocksDBException thrown if the database could not be opened
*/ */
private native static long[] openROnly(final long optionsHandle, final String path, private static native long[] openROnly(final long optionsHandle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions, final byte[][] columnFamilyNames, final long[] columnFamilyOptions,
final boolean errorIfWalFileExists) throws RocksDBException; final boolean errorIfWalFileExists) throws RocksDBException;
private native static long openAsSecondary(final long optionsHandle, final String path, private static native long openAsSecondary(final long optionsHandle, final String path,
final String secondaryPath) throws RocksDBException; final String secondaryPath) throws RocksDBException;
private native static long[] openAsSecondary(final long optionsHandle, final String path, private static native long[] openAsSecondary(final long optionsHandle, final String path,
final String secondaryPath, final byte[][] columnFamilyNames, final String secondaryPath, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions) throws RocksDBException; final long[] columnFamilyOptions) throws RocksDBException;
@Override protected native void disposeInternal(final long handle); @Override protected native void disposeInternal(final long handle);
private native static void closeDatabase(final long handle) private static native void closeDatabase(final long handle) throws RocksDBException;
private static native byte[][] listColumnFamilies(final long optionsHandle, final String path)
throws RocksDBException; throws RocksDBException;
private native static byte[][] listColumnFamilies(final long optionsHandle,
final String path) throws RocksDBException;
private native long createColumnFamily(final long handle, private native long createColumnFamily(final long handle,
final byte[] columnFamilyName, final int columnFamilyNamelen, final byte[] columnFamilyName, final int columnFamilyNamelen,
final long columnFamilyOptions) throws RocksDBException; final long columnFamilyOptions) throws RocksDBException;
private native long[] createColumnFamilies(final long handle, private native long[] createColumnFamilies(final long handle,
final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames) final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames)
throws RocksDBException; throws RocksDBException;
private native long[] createColumnFamilies(final long handle, private native long[] createColumnFamilies(
final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames) final long handle, final long[] columnFamilyOptionsHandles, final byte[][] columnFamilyNames)
throws RocksDBException; throws RocksDBException;
private native void dropColumnFamily( private native void dropColumnFamily(
final long handle, final long cfHandle) throws RocksDBException; final long handle, final long cfHandle) throws RocksDBException;
@ -4645,10 +4639,10 @@ public class RocksDB extends RocksObject {
private native void deleteFilesInRanges(long handle, long cfHandle, final byte[][] ranges, private native void deleteFilesInRanges(long handle, long cfHandle, final byte[][] ranges,
boolean include_end) throws RocksDBException; boolean include_end) throws RocksDBException;
private native static void destroyDB(final String path, private static native void destroyDB(final String path, final long optionsHandle)
final long optionsHandle) throws RocksDBException; throws RocksDBException;
private native static int version(); private static native int version();
protected DBOptionsInterface<?> options_; protected DBOptionsInterface<?> options_;
private static Version version; private static Version version;

@ -27,6 +27,5 @@ public class RocksEnv extends Env {
super(handle); super(handle);
} }
@Override @Override protected final native void disposeInternal(final long handle);
protected native final void disposeInternal(final long handle);
} }

@ -71,7 +71,7 @@ public abstract class RocksMutableObject extends AbstractNativeReference {
} }
@Override @Override
public synchronized final void close() { public final synchronized void close() {
if (isOwningHandle()) { if (isOwningHandle()) {
disposeInternal(); disposeInternal();
this.owningHandle_ = false; this.owningHandle_ = false;

@ -125,9 +125,8 @@ public class Slice extends AbstractSlice<byte[]> {
} }
@Override protected final native byte[] data0(long handle); @Override protected final native byte[] data0(long handle);
private native static long createNewSlice0(final byte[] data, private static native long createNewSlice0(final byte[] data, final int length);
final int length); private static native long createNewSlice1(final byte[] data);
private native static long createNewSlice1(final byte[] data);
private native void clear0(long handle, boolean internalBuffer, private native void clear0(long handle, boolean internalBuffer,
long internalBufferOffset); long internalBufferOffset);
private native void removePrefix0(long handle, int length); private native void removePrefix0(long handle, int length);

@ -29,7 +29,7 @@ public class Snapshot extends RocksObject {
@Override @Override
protected final void disposeInternal(final long handle) { protected final void disposeInternal(final long handle) {
/** /*
* Nothing to release, we never own the pointer for a * Nothing to release, we never own the pointer for a
* Snapshot. The pointer * Snapshot. The pointer
* to the snapshot is released by the database * to the snapshot is released by the database

@ -10,9 +10,9 @@ import java.util.Map;
/** /**
* SstFileManager is used to track SST files in the DB and control their * SstFileManager is used to track SST files in the DB and control their
* deletion rate. * deletion rate.
* * <p>
* All SstFileManager public functions are thread-safe. * All SstFileManager public functions are thread-safe.
* * <p>
* SstFileManager is not extensible. * SstFileManager is not extensible.
*/ */
//@ThreadSafe //@ThreadSafe
@ -55,7 +55,7 @@ public final class SstFileManager extends RocksObject {
* *
* @param env the environment. * @param env the environment.
* @param logger if not null, the logger will be used to log errors. * @param logger if not null, the logger will be used to log errors.
* * <p>
* == Deletion rate limiting specific arguments == * == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If * @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@ -75,7 +75,7 @@ public final class SstFileManager extends RocksObject {
* *
* @param env the environment. * @param env the environment.
* @param logger if not null, the logger will be used to log errors. * @param logger if not null, the logger will be used to log errors.
* * <p>
* == Deletion rate limiting specific arguments == * == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If * @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@ -100,7 +100,7 @@ public final class SstFileManager extends RocksObject {
* *
* @param env the environment. * @param env the environment.
* @param logger if not null, the logger will be used to log errors. * @param logger if not null, the logger will be used to log errors.
* * <p>
* == Deletion rate limiting specific arguments == * == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If * @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@ -123,12 +123,11 @@ public final class SstFileManager extends RocksObject {
rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk)); rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk));
} }
/** /**
* Update the maximum allowed space that should be used by RocksDB, if * Update the maximum allowed space that should be used by RocksDB, if
* the total size of the SST files exceeds {@code maxAllowedSpace}, writes to * the total size of the SST files exceeds {@code maxAllowedSpace}, writes to
* RocksDB will fail. * RocksDB will fail.
* * <p>
* Setting {@code maxAllowedSpace} to 0 will disable this feature; * Setting {@code maxAllowedSpace} to 0 will disable this feature;
* maximum allowed space will be infinite (Default value). * maximum allowed space will be infinite (Default value).
* *
@ -202,7 +201,7 @@ public final class SstFileManager extends RocksObject {
/** /**
* Set the delete rate limit. * Set the delete rate limit.
* * <p>
* Zero means disable delete rate limiting and delete files immediately. * Zero means disable delete rate limiting and delete files immediately.
* *
* @param deleteRate the delete rate limit (in bytes per second). * @param deleteRate the delete rate limit (in bytes per second).
@ -229,9 +228,8 @@ public final class SstFileManager extends RocksObject {
setMaxTrashDBRatio(nativeHandle_, ratio); setMaxTrashDBRatio(nativeHandle_, ratio);
} }
private native static long newSstFileManager(final long handle, private static native long newSstFileManager(final long handle, final long logger_handle,
final long logger_handle, final long rateBytesPerSec, final long rateBytesPerSec, final double maxTrashDbRatio, final long bytesMaxDeleteChunk)
final double maxTrashDbRatio, final long bytesMaxDeleteChunk)
throws RocksDBException; throws RocksDBException;
private native void setMaxAllowedSpaceUsage(final long handle, private native void setMaxAllowedSpaceUsage(final long handle,
final long maxAllowedSpace); final long maxAllowedSpace);
@ -247,5 +245,5 @@ public final class SstFileManager extends RocksObject {
final long deleteRate); final long deleteRate);
private native double getMaxTrashDBRatio(final long handle); private native double getMaxTrashDBRatio(final long handle);
private native void setMaxTrashDBRatio(final long handle, final double ratio); private native void setMaxTrashDBRatio(final long handle, final double ratio);
@Override protected final native void disposeInternal(final long handle); @Override protected native void disposeInternal(final long handle);
} }

@ -18,12 +18,12 @@ public class SstFileReader extends RocksObject {
* Returns an iterator that will iterate on all keys in the default * Returns an iterator that will iterate on all keys in the default
* column family including both keys in the DB and uncommitted keys in this * column family including both keys in the DB and uncommitted keys in this
* transaction. * transaction.
* * <p>
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction * from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be * (the keys in this transaction do not yet belong to any snapshot and will be
* fetched regardless). * fetched regardless).
* * <p>
* Caller is responsible for deleting the returned Iterator. * Caller is responsible for deleting the returned Iterator.
* *
* @param readOptions Read options. * @param readOptions Read options.
@ -32,7 +32,7 @@ public class SstFileReader extends RocksObject {
*/ */
public SstFileReaderIterator newIterator(final ReadOptions readOptions) { public SstFileReaderIterator newIterator(final ReadOptions readOptions) {
assert (isOwningHandle()); assert (isOwningHandle());
long iter = newIterator(nativeHandle_, readOptions.nativeHandle_); final long iter = newIterator(nativeHandle_, readOptions.nativeHandle_);
return new SstFileReaderIterator(this, iter); return new SstFileReaderIterator(this, iter);
} }
@ -75,7 +75,7 @@ public class SstFileReader extends RocksObject {
private native void open(final long handle, final String filePath) private native void open(final long handle, final String filePath)
throws RocksDBException; throws RocksDBException;
private native static long newSstFileReader(final long optionsHandle); private static native long newSstFileReader(final long optionsHandle);
private native void verifyChecksum(final long handle) throws RocksDBException; private native void verifyChecksum(final long handle) throws RocksDBException;
private native TableProperties getTableProperties(final long handle) private native TableProperties getTableProperties(final long handle)
throws RocksDBException; throws RocksDBException;

@ -199,12 +199,11 @@ public class SstFileWriter extends RocksObject {
return fileSize(nativeHandle_); return fileSize(nativeHandle_);
} }
private native static long newSstFileWriter( private static native long newSstFileWriter(final long envOptionsHandle, final long optionsHandle,
final long envOptionsHandle, final long optionsHandle,
final long userComparatorHandle, final byte comparatorType); final long userComparatorHandle, final byte comparatorType);
private native static long newSstFileWriter(final long envOptionsHandle, private static native long newSstFileWriter(
final long optionsHandle); final long envOptionsHandle, final long optionsHandle);
private native void open(final long handle, final String filePath) private native void open(final long handle, final String filePath)
throws RocksDBException; throws RocksDBException;

@ -9,11 +9,11 @@ package org.rocksdb;
* Fixed prefix factory. It partitions SST files using fixed prefix of the key. * Fixed prefix factory. It partitions SST files using fixed prefix of the key.
*/ */
public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory {
public SstPartitionerFixedPrefixFactory(long prefixLength) { public SstPartitionerFixedPrefixFactory(final long prefixLength) {
super(newSstPartitionerFixedPrefixFactory0(prefixLength)); super(newSstPartitionerFixedPrefixFactory0(prefixLength));
} }
private native static long newSstPartitionerFixedPrefixFactory0(long prefixLength); private static native long newSstPartitionerFixedPrefixFactory0(long prefixLength);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
} }

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* The type used to refer to a thread state. * The type used to refer to a thread state.
* * <p>
* A state describes lower-level action of a thread * A state describes lower-level action of a thread
* such as reading / writing a file or waiting for a mutex. * such as reading / writing a file or waiting for a mutex.
*/ */

@ -31,7 +31,7 @@ public class Statistics extends RocksObject {
/** /**
* Intentionally package-private. * Intentionally package-private.
* * <p>
* Used from {@link DBOptions#statistics()} * Used from {@link DBOptions#statistics()}
* *
* @param existingStatisticsHandle The C++ pointer to an existing statistics object * @param existingStatisticsHandle The C++ pointer to an existing statistics object
@ -134,10 +134,11 @@ public class Statistics extends RocksObject {
return toString(nativeHandle_); return toString(nativeHandle_);
} }
private native static long newStatistics(); private static native long newStatistics();
private native static long newStatistics(final long otherStatisticsHandle); private static native long newStatistics(final long otherStatisticsHandle);
private native static long newStatistics(final byte[] ignoreHistograms); private static native long newStatistics(final byte[] ignoreHistograms);
private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle); private static native long newStatistics(
final byte[] ignoreHistograms, final long otherStatisticsHandle);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);

@ -62,50 +62,41 @@ public class StatisticsCollector {
} }
private Runnable collectStatistics() { private Runnable collectStatistics() {
return new Runnable() { return () -> {
@Override
public void run() {
while (_isRunning) { while (_isRunning) {
try { try {
if(Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
break; break;
} }
for(final StatsCollectorInput statsCollectorInput : for (final StatsCollectorInput statsCollectorInput : _statsCollectorInputList) {
_statsCollectorInputList) { final Statistics statistics = statsCollectorInput.getStatistics();
Statistics statistics = statsCollectorInput.getStatistics(); final StatisticsCollectorCallback statsCallback = statsCollectorInput.getCallback();
StatisticsCollectorCallback statsCallback =
statsCollectorInput.getCallback();
// Collect ticker data // Collect ticker data
for(final TickerType ticker : TickerType.values()) { for (final TickerType ticker : TickerType.values()) {
if(ticker != TickerType.TICKER_ENUM_MAX) { if (ticker != TickerType.TICKER_ENUM_MAX) {
final long tickerValue = statistics.getTickerCount(ticker); final long tickerValue = statistics.getTickerCount(ticker);
statsCallback.tickerCallback(ticker, tickerValue); statsCallback.tickerCallback(ticker, tickerValue);
} }
} }
// Collect histogram data // Collect histogram data
for(final HistogramType histogramType : HistogramType.values()) { for (final HistogramType histogramType : HistogramType.values()) {
if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
final HistogramData histogramData = final HistogramData histogramData = statistics.getHistogramData(histogramType);
statistics.getHistogramData(histogramType);
statsCallback.histogramCallback(histogramType, histogramData); statsCallback.histogramCallback(histogramType, histogramData);
} }
} }
} }
Thread.sleep(_statsCollectionInterval); Thread.sleep(_statsCollectionInterval);
} } catch (final InterruptedException e) {
catch (final InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
break; break;
} } catch (final Exception e) {
catch (final Exception e) {
throw new RuntimeException("Error while calculating statistics", e); throw new RuntimeException("Error while calculating statistics", e);
} }
} }
}
}; };
} }
} }

@ -7,7 +7,7 @@ package org.rocksdb;
/** /**
* Callback interface provided to StatisticsCollector. * Callback interface provided to StatisticsCollector.
* * <p>
* Thread safety: * Thread safety:
* StatisticsCollector doesn't make any guarantees about thread safety. * StatisticsCollector doesn't make any guarantees about thread safety.
* If the same reference of StatisticsCollectorCallback is passed to multiple * If the same reference of StatisticsCollectorCallback is passed to multiple

@ -23,7 +23,7 @@ public enum StatsLevel {
/** /**
* Collect all stats, including measuring duration of mutex operations. * Collect all stats, including measuring duration of mutex operations.
* * <p>
* If getting time is expensive on the platform to run, it can * If getting time is expensive on the platform to run, it can
* reduce scalability to more threads, especially for writes. * reduce scalability to more threads, especially for writes.
*/ */

@ -9,7 +9,7 @@ import java.util.Objects;
/** /**
* Represents the status returned by a function call in RocksDB. * Represents the status returned by a function call in RocksDB.
* * <p>
* Currently only used with {@link RocksDBException} when the * Currently only used with {@link RocksDBException} when the
* status is not {@link Code#Ok} * status is not {@link Code#Ok}
*/ */
@ -139,12 +139,12 @@ public class Status {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
Status status = (Status) o; final Status status = (Status) o;
return code == status.code && subCode == status.subCode && Objects.equals(state, status.state); return code == status.code && subCode == status.subCode && Objects.equals(state, status.state);
} }

@ -15,15 +15,15 @@ public class StringAppendOperator extends MergeOperator {
this(','); this(',');
} }
public StringAppendOperator(char delim) { public StringAppendOperator(final char delim) {
super(newSharedStringAppendOperator(delim)); super(newSharedStringAppendOperator(delim));
} }
public StringAppendOperator(String delim) { public StringAppendOperator(final String delim) {
super(newSharedStringAppendOperator(delim)); super(newSharedStringAppendOperator(delim));
} }
private native static long newSharedStringAppendOperator(final char delim); private static native long newSharedStringAppendOperator(final char delim);
private native static long newSharedStringAppendOperator(final String delim); private static native long newSharedStringAppendOperator(final String delim);
@Override protected final native void disposeInternal(final long handle); @Override protected final native void disposeInternal(final long handle);
} }

@ -82,12 +82,12 @@ public class TableFileCreationBriefInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o; final TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o;
return jobId == that.jobId && Objects.equals(dbName, that.dbName) return jobId == that.jobId && Objects.equals(dbName, that.dbName)
&& Objects.equals(columnFamilyName, that.columnFamilyName) && Objects.equals(columnFamilyName, that.columnFamilyName)
&& Objects.equals(filePath, that.filePath) && reason == that.reason; && Objects.equals(filePath, that.filePath) && reason == that.reason;

@ -62,12 +62,12 @@ public class TableFileCreationInfo extends TableFileCreationBriefInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
TableFileCreationInfo that = (TableFileCreationInfo) o; final TableFileCreationInfo that = (TableFileCreationInfo) o;
return fileSize == that.fileSize && Objects.equals(tableProperties, that.tableProperties) return fileSize == that.fileSize && Objects.equals(tableProperties, that.tableProperties)
&& Objects.equals(status, that.status); && Objects.equals(status, that.status);
} }

@ -62,12 +62,12 @@ public class TableFileDeletionInfo {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
TableFileDeletionInfo that = (TableFileDeletionInfo) o; final TableFileDeletionInfo that = (TableFileDeletionInfo) o;
return jobId == that.jobId && Objects.equals(dbName, that.dbName) return jobId == that.jobId && Objects.equals(dbName, that.dbName)
&& Objects.equals(filePath, that.filePath) && Objects.equals(status, that.status); && Objects.equals(filePath, that.filePath) && Objects.equals(status, that.status);
} }

@ -18,5 +18,5 @@ public abstract class TableFormatConfig {
* *
* @return native handle address to native table instance. * @return native handle address to native table instance.
*/ */
abstract protected long newTableFactoryHandle(); protected abstract long newTableFactoryHandle();
} }

@ -380,12 +380,12 @@ public class TableProperties {
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(final Object o) {
if (this == o) if (this == o)
return true; return true;
if (o == null || getClass() != o.getClass()) if (o == null || getClass() != o.getClass())
return false; return false;
TableProperties that = (TableProperties) o; final TableProperties that = (TableProperties) o;
return dataSize == that.dataSize && indexSize == that.indexSize return dataSize == that.dataSize && indexSize == that.indexSize
&& indexPartitions == that.indexPartitions && topLevelIndexSize == that.topLevelIndexSize && indexPartitions == that.indexPartitions && topLevelIndexSize == that.topLevelIndexSize
&& indexKeyIsUserKey == that.indexKeyIsUserKey && indexKeyIsUserKey == that.indexKeyIsUserKey

@ -15,7 +15,7 @@ public class ThreadStatus {
private final OperationType operationType; private final OperationType operationType;
private final long operationElapsedTime; // microseconds private final long operationElapsedTime; // microseconds
private final OperationStage operationStage; private final OperationStage operationStage;
private final long operationProperties[]; private final long[] operationProperties;
private final StateType stateType; private final StateType stateType;
/** /**
@ -113,7 +113,7 @@ public class ThreadStatus {
/** /**
* Get the list of properties that describe some details about the current * Get the list of properties that describe some details about the current
* operation. * operation.
* * <p>
* Each field in might have different meanings for different operations. * Each field in might have different meanings for different operations.
* *
* @return the properties * @return the properties

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save