diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 2f0d4f3ca..fd7eef4d4 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -7,8 +7,8 @@ package org.rocksdb; /** * A CompactionFilter allows an application to modify/delete a key-value at * the time of compaction. - * - * At present we just permit an overriding Java class to wrap a C++ + *
+ * At present, we just permit an overriding Java class to wrap a C++
* implementation
*/
public abstract class AbstractCompactionFilter
* Note that this function should be called only after all
* RocksDB instances referencing the compaction filter are closed.
- * Otherwise an undefined behavior will occur.
+ * Otherwise, an undefined behavior will occur.
*/
@Override
protected final native void disposeInternal(final long handle);
diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
index 380b4461d..4bb985a34 100644
--- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
+++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
@@ -15,7 +15,7 @@ public abstract class AbstractCompactionFilterFactory
* Used for determining the correct C++ cast in native code.
*
* @return The type of the comparator.
@@ -44,11 +44,11 @@ public abstract class AbstractComparator
* The name of the comparator. Used to check for comparator
* mismatches (i.e., a DB created with one comparator is
* accessed using a different comparator).
- *
+ *
* A new name should be used whenever
* the comparator implementation changes in a way that will cause
* the relative ordering of any two keys to change.
- *
+ *
* Names starting with "rocksdb." are reserved and should not be used.
*
* @return The name of this comparator implementation
diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
index b732d2495..2d1bf702b 100644
--- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
+++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
@@ -18,108 +18,102 @@ import java.nio.ByteBuffer;
* {@link org.rocksdb.AbstractComparator} clean.
*/
class AbstractComparatorJniBridge {
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer lengths are correct
+ * before and after the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}
+ * @param a buffer access to first key
+ * @param aLen the length of the a key,
+ * may be smaller than the buffer {@code a}
+ * @param b buffer access to second key
+ * @param bLen the length of the b key,
+ * may be smaller than the buffer {@code b}
+ *
+ * @return the result of the comparison
+ */
+ private static int compareInternal(final AbstractComparator comparator, final ByteBuffer a,
+ final int aLen, final ByteBuffer b, final int bLen) {
+ if (aLen != -1) {
+ a.mark();
+ a.limit(aLen);
+ }
+ if (bLen != -1) {
+ b.mark();
+ b.limit(bLen);
+ }
- /**
- * Only called from JNI.
- *
- * Simply a bridge to calling
- * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)},
- * which ensures that the byte buffer lengths are correct
- * before and after the call.
- *
- * @param comparator the comparator object on which to
- * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}
- * @param a buffer access to first key
- * @param aLen the length of the a key,
- * may be smaller than the buffer {@code a}
- * @param b buffer access to second key
- * @param bLen the length of the b key,
- * may be smaller than the buffer {@code b}
- *
- * @return the result of the comparison
- */
- private static int compareInternal(
- final AbstractComparator comparator,
- final ByteBuffer a, final int aLen,
- final ByteBuffer b, final int bLen) {
- if (aLen != -1) {
- a.mark();
- a.limit(aLen);
- }
- if (bLen != -1) {
- b.mark();
- b.limit(bLen);
- }
+ final int c = comparator.compare(a, b);
- final int c = comparator.compare(a, b);
+ if (aLen != -1) {
+ a.reset();
+ }
+ if (bLen != -1) {
+ b.reset();
+ }
- if (aLen != -1) {
- a.reset();
- }
- if (bLen != -1) {
- b.reset();
- }
+ return c;
+ }
- return c;
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer lengths are correct
+ * before the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}
+ * @param start buffer access to the start key
+ * @param startLen the length of the start key,
+ * may be smaller than the buffer {@code start}
+ * @param limit buffer access to the limit key
+ * @param limitLen the length of the limit key,
+ * may be smaller than the buffer {@code limit}
+ *
+ * @return either {@code startLen} if the start key is unchanged, otherwise
+ * the new length of the start key
+ */
+ private static int findShortestSeparatorInternal(final AbstractComparator comparator,
+ final ByteBuffer start, final int startLen, final ByteBuffer limit, final int limitLen) {
+ if (startLen != -1) {
+ start.limit(startLen);
}
-
- /**
- * Only called from JNI.
- *
- * Simply a bridge to calling
- * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
- * which ensures that the byte buffer lengths are correct
- * before the call.
- *
- * @param comparator the comparator object on which to
- * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}
- * @param start buffer access to the start key
- * @param startLen the length of the start key,
- * may be smaller than the buffer {@code start}
- * @param limit buffer access to the limit key
- * @param limitLen the length of the limit key,
- * may be smaller than the buffer {@code limit}
- *
- * @return either {@code startLen} if the start key is unchanged, otherwise
- * the new length of the start key
- */
- private static int findShortestSeparatorInternal(
- final AbstractComparator comparator,
- final ByteBuffer start, final int startLen,
- final ByteBuffer limit, final int limitLen) {
- if (startLen != -1) {
- start.limit(startLen);
- }
- if (limitLen != -1) {
- limit.limit(limitLen);
- }
- comparator.findShortestSeparator(start, limit);
- return start.remaining();
+ if (limitLen != -1) {
+ limit.limit(limitLen);
}
+ comparator.findShortestSeparator(start, limit);
+ return start.remaining();
+ }
- /**
- * Only called from JNI.
- *
- * Simply a bridge to calling
- * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
- * which ensures that the byte buffer length is correct
- * before the call.
- *
- * @param comparator the comparator object on which to
- * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)}
- * @param key buffer access to the key
- * @param keyLen the length of the key,
- * may be smaller than the buffer {@code key}
- *
- * @return either keyLen if the key is unchanged, otherwise the new length of the key
- */
- private static int findShortSuccessorInternal(
- final AbstractComparator comparator,
- final ByteBuffer key, final int keyLen) {
- if (keyLen != -1) {
- key.limit(keyLen);
- }
- comparator.findShortSuccessor(key);
- return key.remaining();
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer length is correct
+ * before the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)}
+ * @param key buffer access to the key
+ * @param keyLen the length of the key,
+ * may be smaller than the buffer {@code key}
+ *
+ * @return either keyLen if the key is unchanged, otherwise the new length of the key
+ */
+ private static int findShortSuccessorInternal(
+ final AbstractComparator comparator, final ByteBuffer key, final int keyLen) {
+ if (keyLen != -1) {
+ key.limit(keyLen);
}
+ comparator.findShortSuccessor(key);
+ return key.remaining();
+ }
}
diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java
index 6698acf88..d640d3423 100644
--- a/java/src/main/java/org/rocksdb/AbstractEventListener.java
+++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java
@@ -71,8 +71,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen
/**
* Creates an Event Listener that will
- * received all callbacks from C++.
- *
+ * receive all callbacks from C++.
+ *
* If you don't need all callbacks, it is much more efficient to
* just register for the ones you need by calling
* {@link #AbstractEventListener(EnabledEventCallback...)} instead.
@@ -106,8 +106,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen
*/
private static long packToLong(final EnabledEventCallback... enabledEventCallbacks) {
long l = 0;
- for (int i = 0; i < enabledEventCallbacks.length; i++) {
- l |= 1 << enabledEventCallbacks[i].getValue();
+ for (final EnabledEventCallback enabledEventCallback : enabledEventCallbacks) {
+ l |= 1L << enabledEventCallback.getValue();
}
return l;
}
diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
index 7189272b8..1a6251bd4 100644
--- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
+++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
@@ -53,25 +53,23 @@ public abstract class AbstractMutableOptions {
return buffer.toString();
}
- public static abstract class AbstractMutableOptionsBuilder<
- T extends AbstractMutableOptions,
- U extends AbstractMutableOptionsBuilder
* Explanatory note - When or if the Garbage Collector calls {@link Object#finalize()}
* depends on the JVM implementation and system conditions, which the programmer
diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java
index 5a22e2956..0681b6758 100644
--- a/java/src/main/java/org/rocksdb/AbstractSlice.java
+++ b/java/src/main/java/org/rocksdb/AbstractSlice.java
@@ -8,7 +8,7 @@ package org.rocksdb;
/**
* Slices are used by RocksDB to provide
* efficient access to keys and values.
- *
+ *
* This class is package private, implementers
* should extend either of the public abstract classes:
* @see org.rocksdb.Slice
@@ -147,7 +147,7 @@ public abstract class AbstractSlice
* Note that this function should be called only after all
* Transactions referencing the comparator are closed.
- * Otherwise an undefined behavior will occur.
+ * Otherwise, an undefined behavior will occur.
*/
@Override
protected void disposeInternal() {
diff --git a/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/java/src/main/java/org/rocksdb/AbstractWalFilter.java
index d525045c6..fc77eab8e 100644
--- a/java/src/main/java/org/rocksdb/AbstractWalFilter.java
+++ b/java/src/main/java/org/rocksdb/AbstractWalFilter.java
@@ -41,7 +41,7 @@ public abstract class AbstractWalFilter
private static short logRecordFoundResultToShort(
final LogRecordFoundResult logRecordFoundResult) {
- short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8);
+ final short result = (short) (logRecordFoundResult.walProcessingOption.getValue() << 8);
return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0));
}
diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
index 9527a2fd9..41d967f53 100644
--- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
+++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
@@ -20,25 +20,25 @@ public abstract class AbstractWriteBatch extends RocksObject
}
@Override
- public void put(byte[] key, byte[] value) throws RocksDBException {
+ public void put(final byte[] key, final byte[] value) throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length);
}
@Override
- public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key,
- byte[] value) throws RocksDBException {
+ public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value)
+ throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
}
@Override
- public void merge(byte[] key, byte[] value) throws RocksDBException {
+ public void merge(final byte[] key, final byte[] value) throws RocksDBException {
merge(nativeHandle_, key, key.length, value, value.length);
}
@Override
- public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key,
- byte[] value) throws RocksDBException {
+ public void merge(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final byte[] value) throws RocksDBException {
merge(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
}
@@ -53,7 +53,7 @@ public abstract class AbstractWriteBatch extends RocksObject
}
@Override
- public void put(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key,
+ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key,
final ByteBuffer value) throws RocksDBException {
assert key.isDirect() && value.isDirect();
putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(),
@@ -63,12 +63,12 @@ public abstract class AbstractWriteBatch extends RocksObject
}
@Override
- public void delete(byte[] key) throws RocksDBException {
+ public void delete(final byte[] key) throws RocksDBException {
delete(nativeHandle_, key, key.length);
}
@Override
- public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ public void delete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException {
delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
}
@@ -80,7 +80,7 @@ public abstract class AbstractWriteBatch extends RocksObject
}
@Override
- public void delete(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key)
+ public void delete(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key)
throws RocksDBException {
deleteDirect(
nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_);
@@ -88,31 +88,30 @@ public abstract class AbstractWriteBatch extends RocksObject
}
@Override
- public void singleDelete(byte[] key) throws RocksDBException {
+ public void singleDelete(final byte[] key) throws RocksDBException {
singleDelete(nativeHandle_, key, key.length);
}
@Override
- public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException {
singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
}
@Override
- public void deleteRange(byte[] beginKey, byte[] endKey)
- throws RocksDBException {
+ public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length);
}
@Override
- public void deleteRange(ColumnFamilyHandle columnFamilyHandle,
- byte[] beginKey, byte[] endKey) throws RocksDBException {
+ public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey,
+ final byte[] endKey) throws RocksDBException {
deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length,
columnFamilyHandle.nativeHandle_);
}
@Override
- public void putLogData(byte[] blob) throws RocksDBException {
+ public void putLogData(final byte[] blob) throws RocksDBException {
putLogData(nativeHandle_, blob, blob.length);
}
diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
index 5338bc42d..d1d1123dd 100644
--- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
@@ -9,12 +9,12 @@ import java.util.List;
/**
* Advanced Column Family Options which are not
- * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}
- *
+ * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface})
+ *
* Taken from include/rocksdb/advanced_options.h
*/
public interface AdvancedColumnFamilyOptionsInterface<
- T extends AdvancedColumnFamilyOptionsInterface
* When using an OptimisticTransactionDB:
* If this value is too low, some transactions may fail at commit time due
* to not being able to determine whether there were any write conflicts.
- *
+ *
* When using a TransactionDB:
* If Transaction::SetSnapshot is used, TransactionDB will read either
* in-memory write buffers or SST files to do write-conflict checking.
* Increasing this value can reduce the number of reads to SST files
* done for conflict detection.
- *
+ *
* Setting this value to 0 will cause write buffers to be freed immediately
* after they are flushed.
* If this value is set to -1,
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
* will be used.
- *
+ *
* Default:
* If using a TransactionDB/OptimisticTransactionDB, the default value will
* be set to the value of
@@ -336,14 +336,13 @@ public interface AdvancedColumnFamilyOptionsInterface<
/**
* Set compaction style for DB.
- *
+ *
* Default: LEVEL.
*
* @param compactionStyle Compaction style.
* @return the reference to the current options.
*/
- ColumnFamilyOptionsInterface setCompactionStyle(
- CompactionStyle compactionStyle);
+ ColumnFamilyOptionsInterface
* Default: {@link CompactionPriority#ByCompensatedSize}
*
* @param compactionPriority The compaction priority
@@ -444,7 +443,7 @@ public interface AdvancedColumnFamilyOptionsInterface<
* By default, RocksDB runs consistency checks on the LSM every time the LSM
* changes (Flush, Compaction, AddFile). Use this option if you need to
* disable them.
- *
+ *
* Default: true
*
* @param forceConsistencyChecks false to disable consistency checks
diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
index 162d15d80..c8fc84173 100644
--- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* Advanced Column Family Options which are mutable
- *
+ *
* Taken from include/rocksdb/advanced_options.h
* and MutableCFOptions in util/cf_options.h
*/
@@ -58,8 +58,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
- * If it is larger than 0.25, it is santinized to 0.25.
- *
+ * If it is larger than 0.25, it is sanitized to 0.25.
+ *
* Default: 0 (disabled)
*
* @param memtablePrefixBloomSizeRatio the ratio of memtable used by the
@@ -73,8 +73,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
- * If it is larger than 0.25, it is santinized to 0.25.
- *
+ * If it is larger than 0.25, it is sanitized to 0.25.
+ *
* Default: 0 (disabled)
*
* @return the ratio of memtable used by the bloom filter
@@ -85,7 +85,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Threshold used in the MemPurge (memtable garbage collection)
* feature. A value of 0.0 corresponds to no MemPurge,
* a value of 1.0 will trigger a MemPurge as often as possible.
- *
+ *
* Default: 0.0 (disabled)
*
* @param experimentalMempurgeThreshold the threshold used by
@@ -98,7 +98,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Threshold used in the MemPurge (memtable garbage collection)
* feature. A value of 0.0 corresponds to no MemPurge,
* a value of 1.0 will trigger a MemPurge as often as possible.
- *
+ *
* Default: 0 (disabled)
*
* @return the threshold used by the MemPurge decider
@@ -109,7 +109,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Enable whole key bloom filter in memtable. Note this will only take effect
* if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
* can potentially reduce CPU usage for point-look-ups.
- *
+ *
* Default: false (disabled)
*
* @param memtableWholeKeyFiltering true if whole key bloom filter is enabled
@@ -154,12 +154,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* The size of one block in arena memory allocation.
* If ≤ 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
- *
+ *
* There are two additional restriction of the specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
- *
+ *
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
@@ -175,12 +175,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* The size of one block in arena memory allocation.
* If ≤ 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
- *
+ *
* There are two additional restriction of the specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
- *
+ *
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
@@ -294,7 +294,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current options.
- *
+ *
* See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
*/
T setMaxBytesForLevelMultiplier(double multiplier);
@@ -306,7 +306,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
- *
+ *
* See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
*/
double maxBytesForLevelMultiplier();
@@ -315,7 +315,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
- *
+ *
* Default: 1
*
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers
@@ -329,7 +329,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
- *
+ *
* Default: 1
*
* @return The max-size multipliers for each level
@@ -339,7 +339,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
- *
+ *
* Default: 64GB
*
* @param softPendingCompactionBytesLimit The soft limit to impose on
@@ -352,7 +352,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
- *
+ *
* Default: 64GB
*
* @return The soft limit to impose on compaction
@@ -362,7 +362,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
- *
+ *
* Default: 256GB
*
* @param hardPendingCompactionBytesLimit The hard limit to impose on
@@ -375,7 +375,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
- *
+ *
* Default: 256GB
*
* @return The hard limit to impose on compaction
@@ -390,7 +390,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
- * be skipped in a iteration.
+ * be skipped in an iteration.
* @return the reference to the current options.
*/
T setMaxSequentialSkipInIterations(
@@ -403,19 +403,19 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* skipped before a reseek is issued.
* Default: 8
*
- * @return the number of keys could be skipped in a iteration.
+ * @return the number of keys could be skipped in an iteration.
*/
long maxSequentialSkipInIterations();
/**
* Maximum number of successive merge operations on a key in the memtable.
- *
+ *
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
- *
+ *
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
@@ -428,13 +428,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Maximum number of successive merge operations on a key in the memtable.
- *
+ *
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
- *
+ *
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
@@ -443,7 +443,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* After writing every SST file, reopen it and read all the keys.
- *
+ *
* Default: false
*
* @param paranoidFileChecks true to enable paranoid file checks
@@ -454,7 +454,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* After writing every SST file, reopen it and read all the keys.
- *
+ *
* Default: false
*
* @return true if paranoid file checks are enabled
@@ -463,7 +463,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Measure IO stats in compactions and flushes, if true.
- *
+ *
* Default: false
*
* @param reportBgIoStats true to enable reporting
@@ -483,11 +483,11 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Non-bottom-level files older than TTL will go through the compaction
* process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
* set to -1.
- *
+ *
* Enabled only for level compaction for now.
- *
+ *
* Default: 0 (disabled)
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -500,7 +500,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Get the TTL for Non-bottom-level files that will go through the compaction
* process.
- *
+ *
* See {@link #setTtl(long)}.
*
* @return the time-to-live.
@@ -513,18 +513,18 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* One main use of the feature is to make sure a file goes through compaction
* filters periodically. Users can also use the feature to clear up SST
* files using old format.
- *
+ *
* A file's age is computed by looking at file_creation_time or creation_time
* table properties in order, if they have valid non-zero values; if not, the
* age is based on the file's last modified time (given by the underlying
* Env).
- *
+ *
* Supported in Level and FIFO compaction.
* In FIFO compaction, this option has the same meaning as TTL and whichever
* stricter will be used.
* Pre-req: max_open_file == -1.
* unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
- *
+ *
* Values:
* 0: Turn off Periodic compactions.
* UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature
@@ -534,9 +534,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* In FIFO compaction, since the option has the same meaning as ttl,
* when this value is left default, and ttl is left to 0, 30 days will be
* used. Otherwise, min(ttl, periodic_compaction_seconds) will be used.
- *
+ *
* Default: 0xfffffffffffffffe (allow RocksDB to auto-tune)
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -548,7 +548,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Get the periodicCompactionSeconds.
- *
+ *
* See {@link #setPeriodicCompactionSeconds(long)}.
*
* @return the periodic compaction in seconds.
@@ -566,9 +566,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below.
- *
+ *
* Default: false
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -585,9 +585,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below.
- *
+ *
* Default: false
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -601,9 +601,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect.
- *
+ *
* Default: 0
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -618,9 +618,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect.
- *
+ *
* Default: 0
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -632,9 +632,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Set the size limit for blob files. When writing blob files, a new file is opened
* once this limit is reached. Note that enable_blob_files has to be set in
* order for this option to have any effect.
- *
+ *
* Default: 256 MB
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -656,9 +656,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* Set the compression algorithm to use for large values stored in blob files. Note
* that enable_blob_files has to be set in order for this option to have any
* effect.
- *
+ *
* Default: no compression
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -683,7 +683,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
- *
+ *
* Default: false
*
* @param enableBlobGarbageCollection the new enabled/disabled state of blob garbage collection
@@ -698,7 +698,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
- *
+ *
* Default: false
*
* @return true if blob garbage collection is currently enabled.
@@ -711,7 +711,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have
* any effect.
- *
+ *
* Default: 0.25
*
* @param blobGarbageCollectionAgeCutoff the new age cutoff
@@ -725,7 +725,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have
* any effect.
- *
+ *
* Default: 0.25
*
* @return the current age cutoff for garbage collection
@@ -738,12 +738,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
* the blob files in question, assuming they are all eligible based on the
* value of {@link #blobGarbageCollectionAgeCutoff} above. This option is
* currently only supported with leveled compactions.
- *
+ *
* Note that {@link #enableBlobGarbageCollection} has to be set in order for this
* option to have any effect.
- *
+ *
* Default: 1.0
- *
+ *
* Dynamically changeable through the SetOptions() API
*
* @param blobGarbageCollectionForceThreshold new value for the threshold
@@ -752,16 +752,16 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
T setBlobGarbageCollectionForceThreshold(double blobGarbageCollectionForceThreshold);
/**
- * Get the current value for the {@link #blobGarbageCollectionForceThreshold}
+ * Get the current value for the {@code #blobGarbageCollectionForceThreshold}
* @return the current threshold at which garbage collection of blobs is forced
*/
double blobGarbageCollectionForceThreshold();
/**
* Set compaction readahead for blob files.
- *
+ *
* Default: 0
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -780,9 +780,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Set a certain LSM tree level to enable blob files.
- *
+ *
* Default: 0
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
@@ -794,7 +794,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Get the starting LSM tree level to enable blob files.
- *
+ *
* Default: 0
*
* @return the current LSM tree level to enable blob files.
@@ -803,13 +803,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Set a certain prepopulate blob cache option.
- *
+ *
* Default: 0
- *
+ *
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
- * @param prepopulateBlobCache the prepopulate blob cache option
+ * @param prepopulateBlobCache prepopulate the blob cache option
*
* @return the reference to the current options.
*/
@@ -817,7 +817,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface<
/**
* Get the prepopulate blob cache option.
- *
+ *
* Default: 0
*
* @return the current prepopulate blob cache option.
diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java
index 515824a91..3ab220683 100644
--- a/java/src/main/java/org/rocksdb/BackupEngine.java
+++ b/java/src/main/java/org/rocksdb/BackupEngine.java
@@ -9,7 +9,7 @@ import java.util.List;
/**
* BackupEngine allows you to backup
* and restore the database
- *
+ *
* Be aware, that `new BackupEngine` takes time proportional to the amount
* of backups. So if you have a slow filesystem to backup
* and you have a lot of backups then restoring can take some time.
@@ -39,12 +39,12 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
/**
* Captures the state of the database in the latest backup
- *
+ *
* Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with
* the flushBeforeBackup parameter set to false
*
* @param db The database to backup
- *
+ *
* Note - This method is not thread safe
*
* @throws RocksDBException thrown if a new backup could not be created
@@ -72,7 +72,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* always be consistent with the current state of the
* database regardless of the flushBeforeBackup
* parameter.
- *
+ *
* Note - This method is not thread safe
*
* @throws RocksDBException thrown if a new backup could not be created
@@ -105,7 +105,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* always be consistent with the current state of the
* database regardless of the flushBeforeBackup
* parameter.
- *
+ *
* Note - This method is not thread safe
*
* @throws RocksDBException thrown if a new backup could not be created
@@ -179,11 +179,11 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
/**
* Restore the database from a backup
- *
+ *
* IMPORTANT: if options.share_table_files == true and you restore the DB
* from some backup that is not the latest, and you start creating new
* backups from the new DB, they will probably fail!
- *
+ *
* Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
* If you add new data to the DB and try creating a new backup now, the
* database will diverge from backups 4 and 5 and the new backup will fail.
@@ -226,7 +226,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
restoreOptions.nativeHandle_);
}
- private native static long open(final long env, final long backupEngineOptions)
+ private static native long open(final long env, final long backupEngineOptions)
throws RocksDBException;
private native void createNewBackup(final long handle, final long dbHandle,
diff --git a/java/src/main/java/org/rocksdb/BackupEngineOptions.java b/java/src/main/java/org/rocksdb/BackupEngineOptions.java
index 6e2dacc02..2a358faac 100644
--- a/java/src/main/java/org/rocksdb/BackupEngineOptions.java
+++ b/java/src/main/java/org/rocksdb/BackupEngineOptions.java
@@ -25,7 +25,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* BackupEngineOptions constructor.
* Default: null
*
* @param env The environment to use
@@ -72,9 +72,9 @@ public class BackupEngineOptions extends RocksObject {
/**
* Backup Env object. It will be used for backup file I/O. If it's
- * null, backups will be written out using DBs Env. Otherwise
+ * null, backups will be written out using DBs Env. Otherwise,
* backup's I/O will be performed using this object.
- *
+ *
* Default: null
*
* @return The environment in use
@@ -128,7 +128,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* Set the logger to use for Backup info and error messages
- *
+ *
* Default: null
*
* @return The logger in use for the backup
@@ -143,7 +143,7 @@ public class BackupEngineOptions extends RocksObject {
* @param sync If {@code sync == true}, we can guarantee you'll get consistent
* backup even on a machine crash/reboot. Backup process is slower with sync
* enabled. If {@code sync == false}, we don't guarantee anything on machine
- * reboot. However, chances are some of the backups are consistent.
+ * reboot. However, chances are some backups are consistent.
*
* Default: true Set if log files shall be persisted.
* Default: null
*
* @param backupRateLimiter The rate limiter to use for the backup
@@ -266,7 +266,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* Backup rate limiter. Used to control transfer speed for backup. If this is
* not null, {@link #backupRateLimit()} is ignored.
- *
+ *
* Default: null
*
* @return The rate limiter in use for the backup
@@ -308,7 +308,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored.
- *
+ *
* Default: null
*
* @param restoreRateLimiter The rate limiter to use during restore
@@ -324,7 +324,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored.
- *
+ *
* Default: null
*
* @return The rate limiter in use during restore
@@ -400,7 +400,7 @@ public class BackupEngineOptions extends RocksObject {
/**
* During backup user can get callback every time next
* {@link #callbackTriggerIntervalSize()} bytes being copied.
- *
+ *
* Default: 4194304
*
* @param callbackTriggerIntervalSize The interval size for the
@@ -416,8 +416,8 @@ public class BackupEngineOptions extends RocksObject {
/**
* During backup user can get callback every time next
- * {@link #callbackTriggerIntervalSize()} bytes being copied.
- *
+ * {@code #callbackTriggerIntervalSize()} bytes being copied.
+ *
* Default: 4194304
*
* @return The interval size for the callback trigger
@@ -427,7 +427,7 @@ public class BackupEngineOptions extends RocksObject {
return callbackTriggerIntervalSize(nativeHandle_);
}
- private native static long newBackupEngineOptions(final String path);
+ private static native long newBackupEngineOptions(final String path);
private native String backupDir(long handle);
private native void setBackupEnv(final long handle, final long envHandle);
private native void setShareTableFiles(long handle, boolean flag);
diff --git a/java/src/main/java/org/rocksdb/BackupInfo.java b/java/src/main/java/org/rocksdb/BackupInfo.java
index 9244e4eb1..9581b098f 100644
--- a/java/src/main/java/org/rocksdb/BackupInfo.java
+++ b/java/src/main/java/org/rocksdb/BackupInfo.java
@@ -68,9 +68,9 @@ public class BackupInfo {
return app_metadata_;
}
- private int backupId_;
- private long timestamp_;
- private long size_;
- private int numberFiles_;
- private String app_metadata_;
+ private final int backupId_;
+ private final long timestamp_;
+ private final long size_;
+ private final int numberFiles_;
+ private final String app_metadata_;
}
diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
index 9300468b0..70dee3dd9 100644
--- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
+++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
@@ -6,10 +6,10 @@ package org.rocksdb;
/**
* The config for plain table sst format.
- *
+ *
* BlockBasedTable is a RocksDB's default SST file format.
*/
-//TODO(AR) should be renamed BlockBasedTableOptions
+// TODO(AR) should be renamed BlockBasedTableOptions
public class BlockBasedTableConfig extends TableFormatConfig {
public BlockBasedTableConfig() {
@@ -243,7 +243,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* Disable block cache. If this is set to true,
* then no block cache should be used, and the {@link #setBlockCache(Cache)}
* should point to a {@code null} object.
- *
+ *
* Default: false
*
* @param noBlockCache if use block cache
@@ -257,10 +257,10 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use the specified cache for blocks.
* When not null this take precedence even if the user sets a block cache size.
- *
+ *
* {@link org.rocksdb.Cache} should not be disposed before options instances
* using this cache is disposed.
- *
+ *
* {@link org.rocksdb.Cache} instance can be re-used in multiple options
* instances.
*
@@ -276,7 +276,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use the specified persistent cache.
- *
+ *
* If {@code !null} use the specified cache for pages read from device,
* otherwise no page cache is used.
*
@@ -327,7 +327,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* is less than this specified number and adding a new record to the block
* will exceed the configured block size, then this block will be closed and
* the new record will be written to the next block.
- *
+ *
* Default is 10.
*
* @param blockSizeDeviation the deviation to block size allowed
@@ -414,7 +414,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use partitioned full filters for each SST file. This option is incompatible
* with block-based filters.
- *
+ *
* Defaults to false.
*
* @param partitionFilters use partition filters.
@@ -428,7 +428,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/***
* Option to generate Bloom filters that minimize memory
* internal fragmentation.
- *
+ *
* See {@link #setOptimizeFiltersForMemory(boolean)}.
*
* @return true if bloom filters are used to minimize memory internal
@@ -442,7 +442,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Option to generate Bloom filters that minimize memory
* internal fragmentation.
- *
+ *
* When false, malloc_usable_size is not available, or format_version < 5,
* filters are generated without regard to internal fragmentation when
* loaded into memory (historical behavior). When true (and
@@ -452,21 +452,21 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* the reading DB has the same memory allocation characteristics as the
* generating DB. This option does not break forward or backward
* compatibility.
- *
+ *
* While individual filters will vary in bits/key and false positive rate
* when setting is true, the implementation attempts to maintain a weighted
* average FP rate for filters consistent with this option set to false.
- *
+ *
* With Jemalloc for example, this setting is expected to save about 10% of
* the memory footprint and block cache charge of filters, while increasing
* disk usage of filters by about 1-2% due to encoding efficiency losses
* with variance in bits/key.
- *
+ *
* NOTE: Because some memory counted by block cache might be unmapped pages
* within internal fragmentation, this option can increase observed RSS
* memory usage. With {@link #cacheIndexAndFilterBlocks()} == true,
* this option makes the block cache better at using space it is allowed.
- *
+ *
* NOTE: Do not set to true if you do not trust malloc_usable_size. With
* this option, RocksDB might access an allocated memory object beyond its
* original size if malloc_usable_size says it is safe to do so. While this
@@ -495,9 +495,9 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use delta encoding to compress keys in blocks.
- *
+ *
* NOTE: {@link ReadOptions#pinData()} requires this option to be disabled.
- *
+ *
* Default: true
*
* @param useDeltaEncoding true to enable delta encoding
@@ -521,10 +521,10 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Use the specified filter policy to reduce disk reads.
- *
+ *
* {@link org.rocksdb.Filter} should not be closed before options instances
* using this filter are closed.
- *
+ *
* {@link org.rocksdb.Filter} instance can be re-used in multiple options
* instances.
*
@@ -576,7 +576,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Returns true when compression verification is enabled.
- *
+ *
* See {@link #setVerifyCompression(boolean)}.
*
* @return true if compression verification is enabled.
@@ -602,7 +602,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Get the Read amplification bytes per-bit.
- *
+ *
* See {@link #setReadAmpBytesPerBit(int)}.
*
* @return the bytes per-bit.
@@ -613,27 +613,27 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Set the Read amplification bytes per-bit.
- *
+ *
* If used, For every data block we load into memory, we will create a bitmap
* of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap
* will be used to figure out the percentage we actually read of the blocks.
- *
+ *
* When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and
* Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the
* read amplification using this formula
* (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
- *
+ *
* value => memory usage (percentage of loaded blocks memory)
* 1 => 12.50 %
* 2 => 06.25 %
* 4 => 03.12 %
* 8 => 01.56 %
* 16 => 00.78 %
- *
+ *
* Note: This number must be a power of 2, if not it will be sanitized
* to be the next lowest power of 2, for example a value of 7 will be
* treated as 4, a value of 19 will be treated as 16.
- *
+ *
* Default: 0 (disabled)
*
* @param readAmpBytesPerBit the bytes per-bit
@@ -699,7 +699,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Determine if index compression is enabled.
- *
+ *
* See {@link #setEnableIndexCompression(boolean)}.
*
* @return true if index compression is enabled, false otherwise
@@ -710,7 +710,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
/**
* Store index blocks on disk in compressed format.
- *
+ *
* Changing this option to false will avoid the overhead of decompression
* if index blocks are evicted and read back.
*
diff --git a/java/src/main/java/org/rocksdb/BloomFilter.java b/java/src/main/java/org/rocksdb/BloomFilter.java
index 8aff715b7..0b4e93229 100644
--- a/java/src/main/java/org/rocksdb/BloomFilter.java
+++ b/java/src/main/java/org/rocksdb/BloomFilter.java
@@ -69,5 +69,5 @@ public class BloomFilter extends Filter {
this(bitsPerKey);
}
- private native static long createNewBloomFilter(final double bitsKeyKey);
+ private static native long createNewBloomFilter(final double bitsKeyKey);
}
diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
index 8eef95447..f918a8d03 100644
--- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
+++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
@@ -12,7 +12,7 @@ import java.util.List;
/**
* A ByteBuffer containing fetched data, together with a result for the fetch
* and the total size of the object fetched.
- *
+ *
* Used for the individual results of
* {@link RocksDB#multiGetByteBuffers(List, List)}
* {@link RocksDB#multiGetByteBuffers(List, List, List)}
diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java
index 569a1df06..04bd3fcaa 100644
--- a/java/src/main/java/org/rocksdb/Cache.java
+++ b/java/src/main/java/org/rocksdb/Cache.java
@@ -35,6 +35,6 @@ public abstract class Cache extends RocksObject {
return getPinnedUsage(this.nativeHandle_);
}
- private native static long getUsage(final long handle);
- private native static long getPinnedUsage(final long handle);
+ private static native long getUsage(final long handle);
+ private static native long getPinnedUsage(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
index 6c87cc188..12854c510 100644
--- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
+++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
@@ -10,10 +10,11 @@ package org.rocksdb;
*/
public class CassandraCompactionFilter
extends AbstractCompactionFilter
* NOTE: we are producing an additional Java Object here to represent the underlying native C++
* ColumnFamilyHandle object. The underlying object is not owned by ourselves. The Java API user
* likely already had a ColumnFamilyHandle Java object which owns the underlying C++ object, as
* they will have been presented it when they opened the database or added a Column Family.
- *
+ *
*
* TODO(AR) - Potentially a better design would be to cache the active Java Column Family Objects
* in RocksDB, and return the same Java Object instead of instantiating a new one here. This could
* also help us to improve the Java API semantics for Java users. See for example
- * https://github.com/facebook/rocksdb/issues/2687.
+ * ....
*
* @param nativeHandle native handle to the column family.
*/
@@ -80,7 +80,7 @@ public class ColumnFamilyHandle extends RocksObject {
* information, this call might internally lock and release DB mutex to
* access the up-to-date CF options. In addition, all the pointer-typed
* options cannot be referenced any longer than the original options exist.
- *
+ *
* Note that this function is not supported in RocksDBLite.
*
* @return the up-to-date descriptor.
@@ -107,7 +107,7 @@ public class ColumnFamilyHandle extends RocksObject {
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
getID() == that.getID() &&
Arrays.equals(getName(), that.getName());
- } catch (RocksDBException e) {
+ } catch (final RocksDBException e) {
throw new RuntimeException("Cannot compare column family handles", e);
}
}
@@ -118,7 +118,7 @@ public class ColumnFamilyHandle extends RocksObject {
int result = Objects.hash(getID(), rocksDB_.nativeHandle_);
result = 31 * result + Arrays.hashCode(getName());
return result;
- } catch (RocksDBException e) {
+ } catch (final RocksDBException e) {
throw new RuntimeException("Cannot calculate hash code of column family handle", e);
}
}
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
index 65dfd328f..d8d9658fc 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
@@ -1291,7 +1291,7 @@ public class ColumnFamilyOptions extends RocksObject
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
- * @param prepopulateBlobCache the prepopulate blob cache option
+ * @param prepopulateBlobCache prepopulate the blob cache option
*
* @return the reference to the current options.
*/
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
index 97357aacf..776fc7038 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
@@ -121,9 +121,9 @@ public interface ColumnFamilyOptionsInterface
* Default: BytewiseComparator.
* @param builtinComparator a {@link BuiltinComparator} type.
* @return the instance of the current object.
@@ -133,11 +133,11 @@ public interface ColumnFamilyOptionsInterface
* Comparator instance can be re-used in multiple options instances.
*
* @param comparator java instance.
@@ -176,17 +176,17 @@ public interface ColumnFamilyOptionsInterface
* The client should specify only set one of the two.
- * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence
+ * {#setCompactionFilter(AbstractCompactionFilter)} takes precedence
* over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
* if the client specifies both.
- *
+ *
* If multithreaded compaction is being used, the supplied CompactionFilter
* instance may be used from different threads concurrently and so should be thread-safe.
*
@@ -207,7 +207,7 @@ public interface ColumnFamilyOptionsInterface
* The map is keyed by values from {@link #inputFiles()} and
* {@link #outputFiles()}.
*
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
index 4c8d6545c..92b21fc50 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
@@ -17,7 +17,7 @@ public class CompactionOptionsFIFO extends RocksObject {
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
- *
+ *
* Default: 1GB
*
* @param maxTableFilesSize The maximum size of the table files
@@ -33,7 +33,7 @@ public class CompactionOptionsFIFO extends RocksObject {
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
- *
+ *
* Default: 1GB
*
* @return max table file size in bytes
@@ -48,7 +48,7 @@ public class CompactionOptionsFIFO extends RocksObject {
* and compaction won't trigger if average compact bytes per del file is
* larger than options.write_buffer_size. This is to protect large files
* from being compacted again.
- *
+ *
* Default: false
*
* @param allowCompaction true to allow intra-L0 compaction
@@ -61,13 +61,12 @@ public class CompactionOptionsFIFO extends RocksObject {
return this;
}
-
/**
* Check if intra-L0 compaction is enabled.
* When enabled, we try to compact smaller files into larger ones.
- *
+ *
* See {@link #setAllowCompaction(boolean)}.
- *
+ *
* Default: false
*
* @return true if intra-L0 compaction is enabled, false otherwise.
@@ -76,8 +75,7 @@ public class CompactionOptionsFIFO extends RocksObject {
return allowCompaction(nativeHandle_);
}
-
- private native static long newCompactionOptionsFIFO();
+ private static native long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle);
private native void setMaxTableFilesSize(final long handle,
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
index d2dfa4eef..4d2ebdb1f 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
@@ -18,7 +18,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* Percentage flexibility while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into
* this candidate set.
- *
+ *
* Default: 1
*
* @param sizeRatio The size ratio to use
@@ -34,7 +34,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* Percentage flexibility while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into
* this candidate set.
- *
+ *
* Default: 1
*
* @return The size ratio in use
@@ -45,7 +45,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The minimum number of files in a single compaction run.
- *
+ *
* Default: 2
*
* @param minMergeWidth minimum number of files in a single compaction run
@@ -59,7 +59,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The minimum number of files in a single compaction run.
- *
+ *
* Default: 2
*
* @return minimum number of files in a single compaction run
@@ -70,7 +70,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The maximum number of files in a single compaction run.
- *
+ *
* Default: {@link Long#MAX_VALUE}
*
* @param maxMergeWidth maximum number of files in a single compaction run
@@ -84,7 +84,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The maximum number of files in a single compaction run.
- *
+ *
* Default: {@link Long#MAX_VALUE}
*
* @return maximum number of files in a single compaction run
@@ -102,7 +102,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification.
- *
+ *
* Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage.
*
@@ -126,7 +126,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification.
- *
+ *
* Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage.
*
@@ -140,11 +140,11 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* If this option is set to be -1 (the default value), all the output files
* will follow compression type specified.
- *
+ *
* If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage
* of data will be compressed.
- *
+ *
* When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted
* by generation time:
@@ -154,7 +154,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff
* total_C / total_size < this percentage
- *
+ *
* Default: -1
*
* @param compressionSizePercent percentage of size for compression
@@ -170,11 +170,11 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* If this option is set to be -1 (the default value), all the output files
* will follow compression type specified.
- *
+ *
* If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage
* of data will be compressed.
- *
+ *
* When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted
* by generation time:
@@ -184,7 +184,7 @@ public class CompactionOptionsUniversal extends RocksObject {
* well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff
* total_C / total_size < this percentage
- *
+ *
* Default: -1
*
* @return percentage of size for compression
@@ -195,7 +195,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The algorithm used to stop picking files into a single compaction run
- *
+ *
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
*
* @param compactionStopStyle The compaction algorithm
@@ -210,7 +210,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* The algorithm used to stop picking files into a single compaction run
- *
+ *
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
*
* @return The compaction algorithm
@@ -222,7 +222,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files.
- *
+ *
* Default: false
*
* @param allowTrivialMove true if trivial move is allowed
@@ -238,7 +238,7 @@ public class CompactionOptionsUniversal extends RocksObject {
/**
* Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files.
- *
+ *
* Default: false
*
* @return true if trivial move is allowed
@@ -247,7 +247,7 @@ public class CompactionOptionsUniversal extends RocksObject {
return allowTrivialMove(nativeHandle_);
}
- private native static long newCompactionOptionsUniversal();
+ private static native long newCompactionOptionsUniversal();
@Override protected final native void disposeInternal(final long handle);
private native void setSizeRatio(final long handle, final int sizeRatio);
diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java
index b24bbf850..794074df6 100644
--- a/java/src/main/java/org/rocksdb/CompactionStyle.java
+++ b/java/src/main/java/org/rocksdb/CompactionStyle.java
@@ -9,7 +9,7 @@ import java.util.List;
/**
* Enum CompactionStyle
- *
+ *
* RocksDB supports different styles of compaction. Available
* compaction styles can be chosen using this enumeration.
*
@@ -25,7 +25,8 @@ import java.util.List;
* the old data, so it's basically a TTL compaction style.
*
* Default: true
*
* @return true if a direct byte buffer will be used, false otherwise
@@ -62,10 +62,10 @@ public class ComparatorOptions extends RocksObject {
}
/**
- * Controls whether a direct byte buffer (i.e. outside of the normal
+ * Controls whether a direct byte buffer (i.e. outside the normal
* garbage-collected heap) is used, as opposed to a non-direct byte buffer
* which is a wrapper around an on-heap byte[].
- *
+ *
* Default: true
*
* @param useDirectBuffer true if a direct byte buffer should be used,
@@ -86,7 +86,7 @@ public class ComparatorOptions extends RocksObject {
* if it requires less than {@code maxReuseBufferSize}, then an
* existing buffer will be reused, else a new buffer will be
* allocated just for that callback.
- *
+ *
* Default: 64 bytes
*
* @return the maximum size of a buffer which is reused,
@@ -105,7 +105,7 @@ public class ComparatorOptions extends RocksObject {
* if it requires less than {@code maxReuseBufferSize}, then an
* existing buffer will be reused, else a new buffer will be
* allocated just for that callback.
- *
+ *
* Default: 64 bytes
*
* @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to
@@ -119,7 +119,7 @@ public class ComparatorOptions extends RocksObject {
return this;
}
- private native static long newComparatorOptions();
+ private static native long newComparatorOptions();
private native byte reusedSynchronisationType(final long handle);
private native void setReusedSynchronisationType(final long handle,
final byte reusedSynchronisationType);
diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java
index a9072bbb9..2e1ee5731 100644
--- a/java/src/main/java/org/rocksdb/CompressionOptions.java
+++ b/java/src/main/java/org/rocksdb/CompressionOptions.java
@@ -48,9 +48,9 @@ public class CompressionOptions extends RocksObject {
* loaded into the compression library before compressing/uncompressing each
* data block of subsequent files in the subcompaction. Effectively, this
* improves compression ratios when there are repetitions across data blocks.
- *
+ *
* A value of 0 indicates the feature is disabled.
- *
+ *
* Default: 0.
*
* @param maxDictBytes Maximum bytes to use for the dictionary
@@ -75,10 +75,10 @@ public class CompressionOptions extends RocksObject {
* Maximum size of training data passed to zstd's dictionary trainer. Using
* zstd's dictionary trainer can achieve even better compression ratio
* improvements than using {@link #setMaxDictBytes(int)} alone.
- *
+ *
* The training data will be used to generate a dictionary
* of {@link #maxDictBytes()}.
- *
+ *
* Default: 0.
*
* @param zstdMaxTrainBytes Maximum bytes to use for training ZStd.
@@ -104,10 +104,10 @@ public class CompressionOptions extends RocksObject {
* For bottommost_compression_opts, to enable it, user must set enabled=true.
* Otherwise, bottommost compression will use compression_opts as default
* compression options.
- *
+ *
* For compression_opts, if compression_opts.enabled=false, it is still
* used as compression options for compression process.
- *
+ *
* Default: false.
*
* @param enabled true to use these compression options
@@ -131,8 +131,7 @@ public class CompressionOptions extends RocksObject {
return enabled(nativeHandle_);
}
-
- private native static long newCompressionOptions();
+ private static native long newCompressionOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setWindowBits(final long handle, final int windowBits);
diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java
index d1d73d51a..d1ecf0ac8 100644
--- a/java/src/main/java/org/rocksdb/CompressionType.java
+++ b/java/src/main/java/org/rocksdb/CompressionType.java
@@ -35,9 +35,9 @@ public enum CompressionType {
*
* @return CompressionType instance.
*/
- public static CompressionType getCompressionType(String libraryName) {
+ public static CompressionType getCompressionType(final String libraryName) {
if (libraryName != null) {
- for (CompressionType compressionType : CompressionType.values()) {
+ for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.getLibraryName() != null &&
compressionType.getLibraryName().equals(libraryName)) {
return compressionType;
@@ -58,7 +58,7 @@ public enum CompressionType {
* @throws IllegalArgumentException If CompressionType cannot be found for the
* provided byteIdentifier
*/
- public static CompressionType getCompressionType(byte byteIdentifier) {
+ public static CompressionType getCompressionType(final byte byteIdentifier) {
for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.getValue() == byteIdentifier) {
return compressionType;
diff --git a/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java
index 4d93f0c99..026f8b01d 100644
--- a/java/src/main/java/org/rocksdb/ConfigOptions.java
+++ b/java/src/main/java/org/rocksdb/ConfigOptions.java
@@ -44,10 +44,10 @@ public class ConfigOptions extends RocksObject {
@Override protected final native void disposeInternal(final long handle);
- private native static long newConfigOptions();
- private native static void setEnv(final long handle, final long envHandle);
- private native static void setDelimiter(final long handle, final String delimiter);
- private native static void setIgnoreUnknownOptions(final long handle, final boolean ignore);
- private native static void setInputStringsEscaped(final long handle, final boolean escaped);
- private native static void setSanityLevel(final long handle, final byte level);
+ private static native long newConfigOptions();
+ private static native void setEnv(final long handle, final long envHandle);
+ private static native void setDelimiter(final long handle, final String delimiter);
+ private static native void setIgnoreUnknownOptions(final long handle, final boolean ignore);
+ private static native void setInputStringsEscaped(final long handle, final boolean escaped);
+ private static native void setSanityLevel(final long handle, final byte level);
}
diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java
index 9eb5ca873..655d900c3 100644
--- a/java/src/main/java/org/rocksdb/DBOptions.java
+++ b/java/src/main/java/org/rocksdb/DBOptions.java
@@ -11,7 +11,7 @@ import java.util.*;
/**
* DBOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
- *
+ *
* As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
* and will be automatically released if opened in the preamble of a try with resources block.
*/
@@ -24,7 +24,7 @@ public class DBOptions extends RocksObject
/**
* Construct DBOptions.
- *
+ *
* This constructor will create (by allocating a block of memory)
* an {@code rocksdb::DBOptions} in the c++ side.
*/
@@ -36,13 +36,13 @@ public class DBOptions extends RocksObject
/**
* Copy constructor for DBOptions.
- *
+ *
* NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager,
* info_log and other pointers will be cloned!
*
* @param other The DBOptions to copy.
*/
- public DBOptions(DBOptions other) {
+ public DBOptions(final DBOptions other) {
super(copyDBOptions(other.nativeHandle_));
this.env_ = other.env_;
this.numShardBits_ = other.numShardBits_;
diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java
index 02fa3511f..5aa0866ff 100644
--- a/java/src/main/java/org/rocksdb/DirectSlice.java
+++ b/java/src/main/java/org/rocksdb/DirectSlice.java
@@ -10,13 +10,13 @@ import java.nio.ByteBuffer;
/**
* Base class for slices which will receive direct
* ByteBuffer based access to the underlying data.
- *
+ *
* ByteBuffer backed slices typically perform better with
* larger keys and values. When using smaller keys and
* values consider using @see org.rocksdb.Slice
*/
public class DirectSlice extends AbstractSlice
* Note: You should be aware that it is intentionally marked as
* package-private. This is so that developers cannot construct their own
* default DirectSlice objects (at present). As developers cannot construct
@@ -123,9 +123,8 @@ public class DirectSlice extends AbstractSlice
* Default: false
*
* @param useMmapReads true to enable memory mapped reads, false to disable.
@@ -55,7 +55,7 @@ public class EnvOptions extends RocksObject {
/**
* Enable/Disable memory mapped Writes.
- *
+ *
* Default: true
*
* @param useMmapWrites true to enable memory mapped writes, false to disable.
@@ -79,7 +79,7 @@ public class EnvOptions extends RocksObject {
/**
* Enable/Disable direct reads, i.e. {@code O_DIRECT}.
- *
+ *
* Default: false
*
* @param useDirectReads true to enable direct reads, false to disable.
@@ -103,7 +103,7 @@ public class EnvOptions extends RocksObject {
/**
* Enable/Disable direct writes, i.e. {@code O_DIRECT}.
- *
+ *
* Default: false
*
* @param useDirectWrites true to enable direct writes, false to disable.
@@ -127,9 +127,9 @@ public class EnvOptions extends RocksObject {
/**
* Enable/Disable fallocate calls.
- *
+ *
* Default: true
- *
+ *
* If false, {@code fallocate()} calls are bypassed.
*
* @param allowFallocate true to enable fallocate calls, false to disable.
@@ -153,7 +153,7 @@ public class EnvOptions extends RocksObject {
/**
* Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors.
- *
+ *
* Default: true
*
* @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit,
@@ -181,7 +181,7 @@ public class EnvOptions extends RocksObject {
* Allows OS to incrementally sync files to disk while they are being
* written, in the background. Issue one request for every
* {@code bytesPerSync} written.
- *
+ *
* Default: 0
*
* @param bytesPerSync 0 to disable, otherwise the number of bytes.
@@ -323,8 +323,8 @@ public class EnvOptions extends RocksObject {
return rateLimiter;
}
- private native static long newEnvOptions();
- private native static long newEnvOptions(final long dboptions_handle);
+ private static native long newEnvOptions();
+ private static native long newEnvOptions(final long dboptions_handle);
@Override protected final native void disposeInternal(final long handle);
private native void setUseMmapReads(final long handle,
diff --git a/java/src/main/java/org/rocksdb/EventListener.java b/java/src/main/java/org/rocksdb/EventListener.java
index a12ab92ba..27652eaf8 100644
--- a/java/src/main/java/org/rocksdb/EventListener.java
+++ b/java/src/main/java/org/rocksdb/EventListener.java
@@ -12,7 +12,7 @@ import java.util.List;
* be called when specific RocksDB event happens such as flush. It can
* be used as a building block for developing custom features such as
* stats-collector or external compaction algorithm.
- *
+ *
* Note that callback functions should not run for an extended period of
* time before the function returns, otherwise RocksDB may be blocked.
* For example, it is not suggested to do
@@ -21,17 +21,17 @@ import java.util.List;
* {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])}
* (as Put may be blocked in certain cases) in the same thread in the
* EventListener callback.
- *
+ *
* However, doing
* {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int,
* CompactionJobInfo)} and {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} in
* another thread is considered safe.
- *
+ *
* [Threading] All EventListener callback will be called using the
* actual thread that involves in that specific event. For example, it
* is the RocksDB background flush thread that does the actual flush to
* call {@link #onFlushCompleted(RocksDB, FlushJobInfo)}.
- *
+ *
* [Locking] All EventListener callbacks are designed to be called without
* the current thread holding any DB mutex. This is to prevent potential
* deadlock and performance issue when using EventListener callback
@@ -41,7 +41,7 @@ public interface EventListener {
/**
* A callback function to RocksDB which will be called before a
* RocksDB starts to flush memtables.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -55,7 +55,7 @@ public interface EventListener {
/**
* callback function to RocksDB which will be called whenever a
* registered RocksDB flushes a file.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -77,7 +77,7 @@ public interface EventListener {
* on file creations and deletions is suggested to implement
* {@link #onFlushCompleted(RocksDB, FlushJobInfo)} and
* {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}.
- *
+ *
* Note that if applications would like to use the passed reference
* outside this function call, they should make copies from the
* returned value.
@@ -91,7 +91,7 @@ public interface EventListener {
* A callback function to RocksDB which will be called before a
* RocksDB starts to compact. The default implementation is
* no-op.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -108,7 +108,7 @@ public interface EventListener {
* A callback function for RocksDB which will be called whenever
* a registered RocksDB compacts a file. The default implementation
* is a no-op.
- *
+ *
* Note that this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -129,11 +129,11 @@ public interface EventListener {
* of a pointer to DB. Applications that build logic basic based
* on file creations and deletions is suggested to implement
* OnFlushCompleted and OnCompactionCompleted.
- *
+ *
* Historically it will only be called if the file is successfully created.
* Now it will also be called on failure case. User can check info.status
* to see if it succeeded or not.
- *
+ *
* Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these
* returned value.
@@ -147,7 +147,7 @@ public interface EventListener {
* A callback function for RocksDB which will be called before
* a SST file is being created. It will follow by OnTableFileCreated after
* the creation finishes.
- *
+ *
* Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these
* returned value.
@@ -160,11 +160,11 @@ public interface EventListener {
/**
* A callback function for RocksDB which will be called before
* a memtable is made immutable.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
- *
+ *
* Note that if applications would like to use the passed reference
* outside this function call, they should make copies from these
* returned value.
@@ -177,7 +177,7 @@ public interface EventListener {
/**
* A callback function for RocksDB which will be called before
* a column family handle is deleted.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -190,7 +190,7 @@ public interface EventListener {
/**
* A callback function for RocksDB which will be called after an external
* file is ingested using IngestExternalFile.
- *
+ *
* Note that the this function will run on the same thread as
* IngestExternalFile(), if this function is blocked, IngestExternalFile()
* will be blocked from finishing.
@@ -210,7 +210,7 @@ public interface EventListener {
* preventing the database from entering read-only mode. We do not provide any
* guarantee when failed flushes/compactions will be rescheduled if the user
* suppresses an error.
- *
+ *
* Note that this function can run on the same threads as flush, compaction,
* and user writes. So, it is extremely important not to perform heavy
* computations or blocking calls in this function.
@@ -224,7 +224,7 @@ public interface EventListener {
/**
* A callback function for RocksDB which will be called whenever a change
* of superversion triggers a change of the stall conditions.
- *
+ *
* Note that the this function must be implemented in a way such that
* it should not run for an extended period of time before the function
* returns. Otherwise, RocksDB may be blocked.
@@ -301,7 +301,7 @@ public interface EventListener {
* If true, the {@link #onFileReadFinish(FileOperationInfo)}
* and {@link #onFileWriteFinish(FileOperationInfo)} will be called. If
* false, then they won't be called.
- *
+ *
* Default: false
*
* @return whether to callback when file read/write is finished
diff --git a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java
index 6b14a8024..7a99dd6bf 100644
--- a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java
+++ b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java
@@ -74,12 +74,12 @@ public class ExternalFileIngestionInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o;
+ final ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o;
return globalSeqno == that.globalSeqno
&& Objects.equals(columnFamilyName, that.columnFamilyName)
&& Objects.equals(externalFilePath, that.externalFilePath)
diff --git a/java/src/main/java/org/rocksdb/FileOperationInfo.java b/java/src/main/java/org/rocksdb/FileOperationInfo.java
index aa5743ed3..fae9cd5de 100644
--- a/java/src/main/java/org/rocksdb/FileOperationInfo.java
+++ b/java/src/main/java/org/rocksdb/FileOperationInfo.java
@@ -87,7 +87,7 @@ public class FileOperationInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java
index ca9aa0523..414d3a2f3 100644
--- a/java/src/main/java/org/rocksdb/FlushJobInfo.java
+++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java
@@ -90,7 +90,7 @@ public class FlushJobInfo {
* Determine if rocksdb is currently slowing-down all writes to prevent
* creating too many Level 0 files as compaction seems not able to
* catch up the write request speed.
- *
+ *
* This indicates that there are too many files in Level 0.
*
* @return true if rocksdb is currently slowing-down all writes,
@@ -103,7 +103,7 @@ public class FlushJobInfo {
/**
* Determine if rocksdb is currently blocking any writes to prevent
* creating more L0 files.
- *
+ *
* This indicates that there are too many files in level 0.
* Compactions should try to compact L0 files down to lower levels as soon
* as possible.
@@ -151,12 +151,12 @@ public class FlushJobInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- FlushJobInfo that = (FlushJobInfo) o;
+ final FlushJobInfo that = (FlushJobInfo) o;
return columnFamilyId == that.columnFamilyId && threadId == that.threadId && jobId == that.jobId
&& triggeredWritesSlowdown == that.triggeredWritesSlowdown
&& triggeredWritesStop == that.triggeredWritesStop && smallestSeqno == that.smallestSeqno
diff --git a/java/src/main/java/org/rocksdb/FlushOptions.java b/java/src/main/java/org/rocksdb/FlushOptions.java
index 760b515fd..0ec835089 100644
--- a/java/src/main/java/org/rocksdb/FlushOptions.java
+++ b/java/src/main/java/org/rocksdb/FlushOptions.java
@@ -47,13 +47,13 @@ public class FlushOptions extends RocksObject {
}
/**
- * Set to true so that flush would proceeds immediately even it it means
+ * Set to true so that flush would proceed immediately even if it means
* writes will stall for the duration of the flush.
- *
+ *
* Set to false so that the operation will wait until it's possible to do
* the flush without causing stall or until required flush is performed by
* someone else (foreground call or background thread).
- *
+ *
* Default: false
*
* @param allowWriteStall true to allow writes to stall for flush, false
@@ -78,7 +78,7 @@ public class FlushOptions extends RocksObject {
return allowWriteStall(nativeHandle_);
}
- private native static long newFlushOptions();
+ private static native long newFlushOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setWaitForFlush(final long handle,
diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
index 05cc2bb90..4bc860d1c 100644
--- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
@@ -6,7 +6,7 @@ package org.rocksdb;
* Such memtable contains a fix-sized array of buckets, where
* each bucket points to a sorted singly-linked
* list (or null if the bucket is empty).
- *
+ *
* Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key.
diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
index efc78b14e..7cfa1c0df 100644
--- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
@@ -6,7 +6,7 @@ package org.rocksdb;
* Such mem-table representation contains a fix-sized array of
* buckets, where each bucket points to a skiplist (or null if the
* bucket is empty).
- *
+ *
* Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key.
diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java
index c5da68d16..35724a108 100644
--- a/java/src/main/java/org/rocksdb/HistogramType.java
+++ b/java/src/main/java/org/rocksdb/HistogramType.java
@@ -63,7 +63,7 @@ public enum HistogramType {
/**
* number of bytes decompressed.
- *
+ *
* number of bytes is when uncompressed; i.e. before/after respectively
*/
BYTES_DECOMPRESSED((byte) 0x1B),
diff --git a/java/src/main/java/org/rocksdb/IndexType.java b/java/src/main/java/org/rocksdb/IndexType.java
index 162edad1b..5615e929b 100644
--- a/java/src/main/java/org/rocksdb/IndexType.java
+++ b/java/src/main/java/org/rocksdb/IndexType.java
@@ -47,7 +47,7 @@ public enum IndexType {
return value_;
}
- IndexType(byte value) {
+ IndexType(final byte value) {
value_ = value;
}
diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java
index b7c0f0700..197bd89da 100644
--- a/java/src/main/java/org/rocksdb/InfoLogLevel.java
+++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java
@@ -15,7 +15,7 @@ public enum InfoLogLevel {
private final byte value_;
- private InfoLogLevel(final byte value) {
+ InfoLogLevel(final byte value) {
value_ = value;
}
diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
index a6a308daa..1a6a5fccd 100644
--- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
+++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
@@ -136,15 +136,15 @@ public class IngestExternalFileOptions extends RocksObject {
/**
* Set to true if you would like duplicate keys in the file being ingested
* to be skipped rather than overwriting existing data under that key.
- *
+ *
* Usecase: back-fill of some historical data in the database without
* over-writing existing newer version of data.
- *
+ *
* This option could only be used if the DB has been running
* with DBOptions#allowIngestBehind() == true since the dawn of time.
- *
+ *
* All files will be ingested at the bottommost level with seqno=0.
- *
+ *
* Default: false
*
* @param ingestBehind true if you would like duplicate keys in the file being
@@ -160,7 +160,7 @@ public class IngestExternalFileOptions extends RocksObject {
/**
* Returns true write if the global_seqno is written to a given offset
* in the external SST file for backward compatibility.
- *
+ *
* See {@link #setWriteGlobalSeqno(boolean)}.
*
* @return true if the global_seqno is written to a given offset,
@@ -173,21 +173,21 @@ public class IngestExternalFileOptions extends RocksObject {
/**
* Set to true if you would like to write the global_seqno to a given offset
* in the external SST file for backward compatibility.
- *
+ *
* Older versions of RocksDB write the global_seqno to a given offset within
* the ingested SST files, and new versions of RocksDB do not.
- *
+ *
* If you ingest an external SST using new version of RocksDB and would like
* to be able to downgrade to an older version of RocksDB, you should set
* {@link #writeGlobalSeqno()} to true.
- *
+ *
* If your service is just starting to use the new RocksDB, we recommend that
* you set this option to false, which brings two benefits:
* 1. No extra random write for global_seqno during ingestion.
* 2. Without writing external SST file, it's possible to do checksum.
- *
+ *
* We have a plan to set this option to false by default in the future.
- *
+ *
* Default: true
*
* @param writeGlobalSeqno true to write the gloal_seqno to a given offset,
@@ -201,10 +201,10 @@ public class IngestExternalFileOptions extends RocksObject {
return this;
}
- private native static long newIngestExternalFileOptions();
- private native static long newIngestExternalFileOptions(
- final boolean moveFiles, final boolean snapshotConsistency,
- final boolean allowGlobalSeqNo, final boolean allowBlockingFlush);
+ private static native long newIngestExternalFileOptions();
+ private static native long newIngestExternalFileOptions(final boolean moveFiles,
+ final boolean snapshotConsistency, final boolean allowGlobalSeqNo,
+ final boolean allowBlockingFlush);
@Override protected final native void disposeInternal(final long handle);
private native boolean moveFiles(final long handle);
diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java
index 36185d8c9..6149b8529 100644
--- a/java/src/main/java/org/rocksdb/KeyMayExist.java
+++ b/java/src/main/java/org/rocksdb/KeyMayExist.java
@@ -24,7 +24,6 @@ public class KeyMayExist {
}
public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue }
- ;
public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) {
this.exists = exists;
diff --git a/java/src/main/java/org/rocksdb/LRUCache.java b/java/src/main/java/org/rocksdb/LRUCache.java
index db90b17c5..0a9d02e87 100644
--- a/java/src/main/java/org/rocksdb/LRUCache.java
+++ b/java/src/main/java/org/rocksdb/LRUCache.java
@@ -99,7 +99,7 @@ public class LRUCache extends Cache {
capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio));
}
- private native static long newLRUCache(final long capacity, final int numShardBits,
+ private static native long newLRUCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit, final double highPriPoolRatio,
final double lowPriPoolRatio);
@Override protected final native void disposeInternal(final long handle);
diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java
index 00a5d5674..614a7fa50 100644
--- a/java/src/main/java/org/rocksdb/Logger.java
+++ b/java/src/main/java/org/rocksdb/Logger.java
@@ -36,9 +36,8 @@ package org.rocksdb;
* AbstractLogger constructor.
* To make a RocksDB to use a specific MemTable format, its associated
* MemTableConfig should be properly set and passed into Options
* via Options.setMemTableFactory() and open the db using that Options.
@@ -25,5 +25,5 @@ public abstract class MemTableConfig {
*
* @return native handle address to native memory table instance.
*/
- abstract protected long newMemTableFactoryHandle();
+ protected abstract long newMemTableFactoryHandle();
}
diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java
index f4fb577c3..3d429035a 100644
--- a/java/src/main/java/org/rocksdb/MemTableInfo.java
+++ b/java/src/main/java/org/rocksdb/MemTableInfo.java
@@ -77,12 +77,12 @@ public class MemTableInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- MemTableInfo that = (MemTableInfo) o;
+ final MemTableInfo that = (MemTableInfo) o;
return firstSeqno == that.firstSeqno && earliestSeqno == that.earliestSeqno
&& numEntries == that.numEntries && numDeletes == that.numDeletes
&& Objects.equals(columnFamilyName, that.columnFamilyName);
diff --git a/java/src/main/java/org/rocksdb/MemoryUsageType.java b/java/src/main/java/org/rocksdb/MemoryUsageType.java
index 6010ce7af..40e6d1716 100644
--- a/java/src/main/java/org/rocksdb/MemoryUsageType.java
+++ b/java/src/main/java/org/rocksdb/MemoryUsageType.java
@@ -64,7 +64,7 @@ public enum MemoryUsageType {
"Illegal value provided for MemoryUsageType.");
}
- MemoryUsageType(byte value) {
+ MemoryUsageType(final byte value) {
value_ = value;
}
diff --git a/java/src/main/java/org/rocksdb/MemoryUtil.java b/java/src/main/java/org/rocksdb/MemoryUtil.java
index 52b2175e6..15b9f001a 100644
--- a/java/src/main/java/org/rocksdb/MemoryUtil.java
+++ b/java/src/main/java/org/rocksdb/MemoryUtil.java
@@ -28,12 +28,12 @@ public class MemoryUtil {
* @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}.
*/
public static Map
* See {@link #builder()} and {@link #parse(String)}.
*/
private MutableColumnFamilyOptions(final String[] keys,
@@ -36,11 +34,11 @@ public class MutableColumnFamilyOptions
/**
* Parses a String representation of MutableColumnFamilyOptions
- *
+ *
* The format is: key1=value1;key2=value2;key3=value3 etc
- *
+ *
* For int[] values, each int should be separated by a colon, e.g.
- *
+ *
* key1=value1;intArrayKey1=1:2:3
*
* @param str The string representation of the mutable column family options
@@ -157,8 +155,8 @@ public class MutableColumnFamilyOptions
public static class MutableColumnFamilyOptionsBuilder
extends AbstractMutableOptionsBuilder
* Larger values increase performance, especially during bulk loads.
* Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter
* to control memory usage.
- *
+ *
* Also, a larger write buffer will result in a longer recovery time
* the next time the database is opened.
- *
+ *
* Default: 64MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current object.
@@ -56,7 +56,7 @@ public interface MutableColumnFamilyOptionsInterface<
/**
* Number of files to trigger level-0 compaction. A value < 0 means that
* level-0 compaction will not be triggered by number of files at all.
- *
+ *
* Default: 4
*
* @param level0FileNumCompactionTrigger The number of files to trigger
@@ -68,7 +68,7 @@ public interface MutableColumnFamilyOptionsInterface<
/**
* Number of files to trigger level-0 compaction. A value < 0 means that
* level-0 compaction will not be triggered by number of files at all.
- *
+ *
* Default: 4
*
* @return The number of files to trigger
@@ -109,7 +109,7 @@ public interface MutableColumnFamilyOptionsInterface<
* @param maxBytesForLevelBase maximum bytes for level base.
*
* @return the reference to the current option.
- *
+ *
* See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)}
*/
T setMaxBytesForLevelBase(
@@ -127,7 +127,7 @@ public interface MutableColumnFamilyOptionsInterface<
*
* @return the upper-bound of the total size of level-1 files
* in bytes.
- *
+ *
* See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()}
*/
long maxBytesForLevelBase();
@@ -135,7 +135,7 @@ public interface MutableColumnFamilyOptionsInterface<
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
- *
+ *
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @param compressionType Compression Type.
@@ -147,7 +147,7 @@ public interface MutableColumnFamilyOptionsInterface<
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
- *
+ *
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @return Compression type.
diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java
index bfba1dab3..927e80522 100644
--- a/java/src/main/java/org/rocksdb/MutableDBOptions.java
+++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java
@@ -11,13 +11,12 @@ import java.util.Map;
import java.util.Objects;
public class MutableDBOptions extends AbstractMutableOptions {
-
/**
* User must use builder pattern, or parser.
*
* @param keys the keys
* @param values the values
- *
+ *
* See {@link #builder()} and {@link #parse(String)}.
*/
private MutableDBOptions(final String[] keys, final String[] values) {
@@ -37,11 +36,11 @@ public class MutableDBOptions extends AbstractMutableOptions {
/**
* Parses a String representation of MutableDBOptions
- *
+ *
* The format is: key1=value1;key2=value2;key3=value3 etc
- *
+ *
* For int[] values, each int should be separated by a comma, e.g.
- *
+ *
* key1=value1;intArrayKey1=1:2:3
*
* @param str The string representation of the mutable db options
@@ -49,7 +48,7 @@ public class MutableDBOptions extends AbstractMutableOptions {
*
* @return A builder for the mutable db options
*/
- public static MutableDBOptionsBuilder parse(final String str, boolean ignoreUnknown) {
+ public static MutableDBOptionsBuilder parse(final String str, final boolean ignoreUnknown) {
Objects.requireNonNull(str);
final List
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
@@ -90,9 +90,9 @@ public interface MutableDBOptionsInterface
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* API.
@@ -105,7 +105,7 @@ public interface MutableDBOptionsInterface
* Default: 0
- *
+ *
* Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
*
* @param delayedWriteRate the rate in bytes per second
@@ -162,11 +162,11 @@ public interface MutableDBOptionsInterface
* Default: 0
- *
+ *
* Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
*
* @return the rate in bytes per second
@@ -358,7 +358,7 @@ public interface MutableDBOptionsInterface
* Default: false
*
* @param strictBytesPerSync the bytes per sync
@@ -405,7 +405,7 @@ public interface MutableDBOptionsInterface
* Default: 0
*
* @param compactionReadaheadSize The compaction read-ahead size
@@ -429,9 +429,9 @@ public interface MutableDBOptionsInterface
* Default: 0
*
* @return The compaction read-ahead size
diff --git a/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java
index 7f69eeb9e..fe689b5d0 100644
--- a/java/src/main/java/org/rocksdb/MutableOptionValue.java
+++ b/java/src/main/java/org/rocksdb/MutableOptionValue.java
@@ -13,8 +13,7 @@ public abstract class MutableOptionValue
* The native comparator must directly extend rocksdb::Comparator.
*/
public abstract class NativeComparatorWrapper
diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java
index 301caea32..bf7353468 100644
--- a/java/src/main/java/org/rocksdb/OperationType.java
+++ b/java/src/main/java/org/rocksdb/OperationType.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* The type used to refer to a thread operation.
- *
+ *
* A thread operation describes high-level action of a thread,
* examples include compaction and flush.
*/
diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
index 5a2e1f3ed..ac3cdc210 100644
--- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
+++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
@@ -94,16 +94,15 @@ public class OptimisticTransactionDB extends RocksDB
return otdb;
}
-
/**
* This is similar to {@link #close()} except that it
* throws an exception if any error occurs.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*
* @throws RocksDBException if an error occurs whilst closing.
@@ -121,12 +120,12 @@ public class OptimisticTransactionDB extends RocksDB
/**
* This is similar to {@link #closeE()} except that it
* silently ignores any errors.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*/
@Override
@@ -209,8 +208,7 @@ public class OptimisticTransactionDB extends RocksDB
final String path) throws RocksDBException;
protected static native long[] open(final long handle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
- private native static void closeDatabase(final long handle)
- throws RocksDBException;
+ private static native void closeDatabase(final long handle) throws RocksDBException;
private native long beginTransaction(final long handle,
final long writeOptionsHandle);
private native long beginTransaction(final long handle,
diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
index 250edf806..a2f5d85ab 100644
--- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
+++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
@@ -43,7 +43,7 @@ public class OptimisticTransactionOptions extends RocksObject
return this;
}
- private native static long newOptimisticTransactionOptions();
+ private static native long newOptimisticTransactionOptions();
private native boolean isSetSnapshot(final long handle);
private native void setSetSnapshot(final long handle,
final boolean setSnapshot);
diff --git a/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java
index a89b3313d..61d2a94fe 100644
--- a/java/src/main/java/org/rocksdb/OptionString.java
+++ b/java/src/main/java/org/rocksdb/OptionString.java
@@ -10,13 +10,13 @@ import java.util.List;
import java.util.Objects;
public class OptionString {
- private final static char kvPairSeparator = ';';
- private final static char kvSeparator = '=';
- private final static char complexValueBegin = '{';
- private final static char complexValueEnd = '}';
- private final static char wrappedValueBegin = '{';
- private final static char wrappedValueEnd = '}';
- private final static char arrayValueSeparator = ':';
+ private static final char kvPairSeparator = ';';
+ private static final char kvSeparator = '=';
+ private static final char complexValueBegin = '{';
+ private static final char complexValueEnd = '}';
+ private static final char wrappedValueBegin = '{';
+ private static final char wrappedValueEnd = '}';
+ private static final char arrayValueSeparator = ':';
static class Value {
final List
* As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
* and will be automatically released if opened in the preamble of a try with resources block.
*/
@@ -33,7 +33,7 @@ public class Options extends RocksObject
if (properties == null || properties.size() == 0) {
throw new IllegalArgumentException("Properties value must contain at least one value.");
}
- StringBuilder stringBuilder = new StringBuilder();
+ final StringBuilder stringBuilder = new StringBuilder();
for (final String name : properties.stringPropertyNames()) {
stringBuilder.append(name);
stringBuilder.append("=");
@@ -45,7 +45,7 @@ public class Options extends RocksObject
/**
* Construct options for opening a RocksDB.
- *
+ *
* This constructor will create (by allocating a block of memory)
* an {@code rocksdb::Options} in the c++ side.
*/
@@ -71,13 +71,13 @@ public class Options extends RocksObject
/**
* Copy constructor for ColumnFamilyOptions.
- *
+ *
* NOTE: This does a shallow copy, which means comparator, merge_operator
* and other pointers will be cloned!
*
* @param other The Options to copy.
*/
- public Options(Options other) {
+ public Options(final Options other) {
super(copyOptions(other.nativeHandle_));
this.env_ = other.env_;
this.memTableConfig_ = other.memTableConfig_;
@@ -179,8 +179,7 @@ public class Options extends RocksObject
}
@Override
- public Options optimizeForPointLookup(
- long blockCacheSizeMb) {
+ public Options optimizeForPointLookup(final long blockCacheSizeMb) {
optimizeForPointLookup(nativeHandle_,
blockCacheSizeMb);
return this;
@@ -194,8 +193,7 @@ public class Options extends RocksObject
}
@Override
- public Options optimizeLevelStyleCompaction(
- long memtableMemoryBudget) {
+ public Options optimizeLevelStyleCompaction(final long memtableMemoryBudget) {
optimizeLevelStyleCompaction(nativeHandle_,
memtableMemoryBudget);
return this;
@@ -388,8 +386,8 @@ public class Options extends RocksObject
assert(isOwningHandle());
final int len = dbPaths.size();
- final String paths[] = new String[len];
- final long targetSizes[] = new long[len];
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
int i = 0;
for(final DbPath dbPath : dbPaths) {
@@ -407,8 +405,8 @@ public class Options extends RocksObject
if(len == 0) {
return Collections.emptyList();
} else {
- final String paths[] = new String[len];
- final long targetSizes[] = new long[len];
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
dbPaths(nativeHandle_, paths, targetSizes);
@@ -651,7 +649,7 @@ public class Options extends RocksObject
}
@Override
- public Options setMaxWriteBatchGroupSizeBytes(long maxWriteBatchGroupSizeBytes) {
+ public Options setMaxWriteBatchGroupSizeBytes(final long maxWriteBatchGroupSizeBytes) {
setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes);
return this;
}
@@ -1066,7 +1064,8 @@ public class Options extends RocksObject
}
@Override
- public Options setSkipCheckingSstFileSizesOnDbOpen(boolean skipCheckingSstFileSizesOnDbOpen) {
+ public Options setSkipCheckingSstFileSizesOnDbOpen(
+ final boolean skipCheckingSstFileSizesOnDbOpen) {
setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen);
return this;
}
@@ -1377,12 +1376,11 @@ public class Options extends RocksObject
}
@Override
- public Options setCompressionType(CompressionType compressionType) {
+ public Options setCompressionType(final CompressionType compressionType) {
setCompressionType(nativeHandle_, compressionType.getValue());
return this;
}
-
@Override
public Options setBottommostCompressionType(
final CompressionType bottommostCompressionType) {
@@ -1442,7 +1440,7 @@ public class Options extends RocksObject
}
@Override
- public Options setNumLevels(int numLevels) {
+ public Options setNumLevels(final int numLevels) {
setNumLevels(nativeHandle_, numLevels);
return this;
}
@@ -1490,7 +1488,7 @@ public class Options extends RocksObject
}
@Override
- public Options setTargetFileSizeBase(long targetFileSizeBase) {
+ public Options setTargetFileSizeBase(final long targetFileSizeBase) {
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
return this;
}
@@ -1501,7 +1499,7 @@ public class Options extends RocksObject
}
@Override
- public Options setTargetFileSizeMultiplier(int multiplier) {
+ public Options setTargetFileSizeMultiplier(final int multiplier) {
setTargetFileSizeMultiplier(nativeHandle_, multiplier);
return this;
}
@@ -1662,7 +1660,7 @@ public class Options extends RocksObject
}
@Override
- public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) {
+ public Options setMaxSuccessiveMerges(final long maxSuccessiveMerges) {
setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
return this;
}
@@ -1692,9 +1690,7 @@ public class Options extends RocksObject
}
@Override
- public Options
- setMemtableHugePageSize(
- long memtableHugePageSize) {
+ public Options setMemtableHugePageSize(final long memtableHugePageSize) {
setMemtableHugePageSize(nativeHandle_,
memtableHugePageSize);
return this;
@@ -1706,7 +1702,7 @@ public class Options extends RocksObject
}
@Override
- public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
+ public Options setSoftPendingCompactionBytesLimit(final long softPendingCompactionBytesLimit) {
setSoftPendingCompactionBytesLimit(nativeHandle_,
softPendingCompactionBytesLimit);
return this;
@@ -1718,7 +1714,7 @@ public class Options extends RocksObject
}
@Override
- public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
+ public Options setHardPendingCompactionBytesLimit(final long hardPendingCompactionBytesLimit) {
setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
return this;
}
@@ -1729,7 +1725,7 @@ public class Options extends RocksObject
}
@Override
- public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
+ public Options setLevel0FileNumCompactionTrigger(final int level0FileNumCompactionTrigger) {
setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
return this;
}
@@ -1740,7 +1736,7 @@ public class Options extends RocksObject
}
@Override
- public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
+ public Options setLevel0SlowdownWritesTrigger(final int level0SlowdownWritesTrigger) {
setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
return this;
}
@@ -1751,7 +1747,7 @@ public class Options extends RocksObject
}
@Override
- public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
+ public Options setLevel0StopWritesTrigger(final int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this;
}
@@ -1762,7 +1758,8 @@ public class Options extends RocksObject
}
@Override
- public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
+ public Options setMaxBytesForLevelMultiplierAdditional(
+ final int[] maxBytesForLevelMultiplierAdditional) {
setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
return this;
}
@@ -1773,7 +1770,7 @@ public class Options extends RocksObject
}
@Override
- public Options setParanoidFileChecks(boolean paranoidFileChecks) {
+ public Options setParanoidFileChecks(final boolean paranoidFileChecks) {
setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
return this;
}
@@ -1892,7 +1889,7 @@ public class Options extends RocksObject
}
@Override
- public Options setAvoidUnnecessaryBlockingIO(boolean avoidUnnecessaryBlockingIO) {
+ public Options setAvoidUnnecessaryBlockingIO(final boolean avoidUnnecessaryBlockingIO) {
setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO);
return this;
}
@@ -1904,7 +1901,7 @@ public class Options extends RocksObject
}
@Override
- public Options setPersistStatsToDisk(boolean persistStatsToDisk) {
+ public Options setPersistStatsToDisk(final boolean persistStatsToDisk) {
setPersistStatsToDisk(nativeHandle_, persistStatsToDisk);
return this;
}
@@ -1916,7 +1913,7 @@ public class Options extends RocksObject
}
@Override
- public Options setWriteDbidToManifest(boolean writeDbidToManifest) {
+ public Options setWriteDbidToManifest(final boolean writeDbidToManifest) {
setWriteDbidToManifest(nativeHandle_, writeDbidToManifest);
return this;
}
@@ -1928,7 +1925,7 @@ public class Options extends RocksObject
}
@Override
- public Options setLogReadaheadSize(long logReadaheadSize) {
+ public Options setLogReadaheadSize(final long logReadaheadSize) {
setLogReadaheadSize(nativeHandle_, logReadaheadSize);
return this;
}
@@ -1940,7 +1937,7 @@ public class Options extends RocksObject
}
@Override
- public Options setBestEffortsRecovery(boolean bestEffortsRecovery) {
+ public Options setBestEffortsRecovery(final boolean bestEffortsRecovery) {
setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery);
return this;
}
@@ -1952,7 +1949,7 @@ public class Options extends RocksObject
}
@Override
- public Options setMaxBgErrorResumeCount(int maxBgerrorResumeCount) {
+ public Options setMaxBgErrorResumeCount(final int maxBgerrorResumeCount) {
setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount);
return this;
}
@@ -1964,7 +1961,7 @@ public class Options extends RocksObject
}
@Override
- public Options setBgerrorResumeRetryInterval(long bgerrorResumeRetryInterval) {
+ public Options setBgerrorResumeRetryInterval(final long bgerrorResumeRetryInterval) {
setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval);
return this;
}
@@ -1976,7 +1973,7 @@ public class Options extends RocksObject
}
@Override
- public Options setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) {
+ public Options setSstPartitionerFactory(final SstPartitionerFactory sstPartitionerFactory) {
setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_);
this.sstPartitionerFactory_ = sstPartitionerFactory;
return this;
@@ -2038,7 +2035,7 @@ public class Options extends RocksObject
}
@Override
- public Options setBlobCompressionType(CompressionType compressionType) {
+ public Options setBlobCompressionType(final CompressionType compressionType) {
setBlobCompressionType(nativeHandle_, compressionType.getValue());
return this;
}
@@ -2119,10 +2116,9 @@ public class Options extends RocksObject
// END options for blobs (integrated BlobDB)
//
- private native static long newOptions();
- private native static long newOptions(long dbOptHandle,
- long cfOptHandle);
- private native static long copyOptions(long handle);
+ private static native long newOptions();
+ private static native long newOptions(long dbOptHandle, long cfOptHandle);
+ private static native long copyOptions(long handle);
@Override protected final native void disposeInternal(final long handle);
private native void setEnv(long optHandle, long envHandle);
private native void prepareForBulkLoad(long handle);
diff --git a/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java
index e21121a2b..612023d8e 100644
--- a/java/src/main/java/org/rocksdb/OptionsUtil.java
+++ b/java/src/main/java/org/rocksdb/OptionsUtil.java
@@ -12,12 +12,12 @@ public class OptionsUtil {
* A static method to construct the DBOptions and ColumnFamilyDescriptors by
* loading the latest RocksDB options file stored in the specified rocksdb
* database.
- *
+ *
* Note that the all the pointer options (except table_factory, which will
* be described in more details below) will be initialized with the default
* values. Developers can further initialize them after this function call.
* Below is an example list of pointer options which will be initialized.
- *
+ *
* - env
* - memtable_factory
* - compaction_filter_factory
@@ -25,7 +25,7 @@ public class OptionsUtil {
* - comparator
* - merge_operator
* - compaction_filter
- *
+ *
* For table_factory, this function further supports deserializing
* BlockBasedTableFactory and its BlockBasedTableOptions except the
* pointer options of BlockBasedTableOptions (flush_block_policy_factory,
@@ -43,8 +43,9 @@ public class OptionsUtil {
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public static void loadLatestOptions(ConfigOptions configOptions, String dbPath,
- DBOptions dbOptions, List
* Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++.
*/
@@ -27,13 +27,13 @@ public class ReadOptions extends RocksObject {
/**
* Copy constructor.
- *
+ *
* NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound
* and other pointers will be cloned!
*
* @param other The ReadOptions to copy.
*/
- public ReadOptions(ReadOptions other) {
+ public ReadOptions(final ReadOptions other) {
super(copyReadOptions(other.nativeHandle_));
this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
@@ -106,7 +106,7 @@ public class ReadOptions extends RocksObject {
*/
public Snapshot snapshot() {
assert(isOwningHandle());
- long snapshotHandle = snapshot(nativeHandle_);
+ final long snapshotHandle = snapshot(nativeHandle_);
if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle);
}
@@ -128,7 +128,7 @@ public class ReadOptions extends RocksObject {
if (snapshot != null) {
setSnapshot(nativeHandle_, snapshot.nativeHandle_);
} else {
- setSnapshot(nativeHandle_, 0l);
+ setSnapshot(nativeHandle_, 0L);
}
return this;
}
@@ -256,7 +256,7 @@ public class ReadOptions extends RocksObject {
* Enforce that the iterator only iterates over the same prefix as the seek.
* This option is effective only for prefix seeks, i.e. prefix_extractor is
* non-null for the column family and {@link #totalOrderSeek()} is false.
- * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only
+ * Unlike iterate_upper_bound, {@code #setPrefixSameAsStart(boolean)} only
* works within a prefix but in both directions.
*
* @param prefixSameAsStart if true, then the iterator only iterates over the
@@ -300,7 +300,7 @@ public class ReadOptions extends RocksObject {
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files
* in background.
- *
+ *
* Default: false
*
* @return true when PurgeObsoleteFile is called in CleanupIteratorState
@@ -314,7 +314,7 @@ public class ReadOptions extends RocksObject {
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files
* in background.
- *
+ *
* Default: false
*
* @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
@@ -333,7 +333,7 @@ public class ReadOptions extends RocksObject {
* If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (> 2MB) can
* improve the performance of forward iteration on spinning disks.
- *
+ *
* Default: 0
*
* @return The readahead size is bytes
@@ -347,7 +347,7 @@ public class ReadOptions extends RocksObject {
* If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (> 2MB) can
* improve the performance of forward iteration on spinning disks.
- *
+ *
* Default: 0
*
* @param readaheadSize The readahead size is bytes
@@ -375,7 +375,7 @@ public class ReadOptions extends RocksObject {
* A threshold for the number of keys that can be skipped before failing an
* iterator seek as incomplete. The default value of 0 should be used to
* never fail a request as incomplete, even on skipping too many keys.
- *
+ *
* Default: 0
*
* @param maxSkippableInternalKeys the number of keys that can be skipped
@@ -394,7 +394,7 @@ public class ReadOptions extends RocksObject {
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions.
- *
+ *
* Default: false
*
* @return true if keys deleted using the DeleteRange() API will be visible
@@ -408,7 +408,7 @@ public class ReadOptions extends RocksObject {
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions.
- *
+ *
* Default: false
*
* @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
@@ -425,14 +425,14 @@ public class ReadOptions extends RocksObject {
* Defines the smallest key at which the backward
* iterator can return an entry. Once the bound is passed,
* {@link RocksIterator#isValid()} will be false.
- *
+ *
* The lower bound is inclusive i.e. the bound value is a valid
* entry.
- *
+ *
* If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain.
- *
+ *
* Default: null
*
* @param iterateLowerBound Slice representing the lower bound
@@ -450,7 +450,7 @@ public class ReadOptions extends RocksObject {
/**
* Returns the smallest key at which the backward
* iterator can return an entry.
- *
+ *
* The lower bound is inclusive i.e. the bound value is a valid entry.
*
* @return the smallest key, or null if there is no lower bound defined.
@@ -468,15 +468,15 @@ public class ReadOptions extends RocksObject {
/**
* Defines the extent up to which the forward iterator
- * can returns entries. Once the bound is reached,
+ * can return entries. Once the bound is reached,
* {@link RocksIterator#isValid()} will be false.
- *
+ *
* The upper bound is exclusive i.e. the bound value is not a valid entry.
- *
+ *
* If prefix_extractor is not null, the Seek target and iterate_upper_bound
* need to have the same prefix. This is because ordering is not guaranteed
* outside of prefix domain.
- *
+ *
* Default: null
*
* @param iterateUpperBound Slice representing the upper bound
@@ -494,7 +494,7 @@ public class ReadOptions extends RocksObject {
/**
* Returns the largest key at which the forward
* iterator can return an entry.
- *
+ *
* The upper bound is exclusive i.e. the bound value is not a valid entry.
*
* @return the largest key, or null if there is no upper bound defined.
@@ -516,7 +516,7 @@ public class ReadOptions extends RocksObject {
* properties of each table during iteration. If the callback returns false,
* the table will not be scanned. This option only affects Iterators and has
* no impact on point lookups.
- *
+ *
* Default: null (every table will be scanned)
*
* @param tableFilter the table filter for the callback.
@@ -568,7 +568,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development,
* and the API is subject to change.
- *
+ *
* Default: null
* @see #iterStartTs()
* @return Reference to timestamp or null if there is no timestamp defined.
@@ -594,7 +594,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development,
* and the API is subject to change.
- *
+ *
* Default: null
* @see #setIterStartTs(AbstractSlice)
* @param timestamp Slice representing the timestamp
@@ -618,7 +618,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development,
* and the API is subject to change.
- *
+ *
* Default: null
* @return Reference to lower bound timestamp or null if there is no lower bound timestamp
* defined.
@@ -644,7 +644,7 @@ public class ReadOptions extends RocksObject {
* only the most recent version visible to timestamp is returned.
* The user-specified timestamp feature is still under active development,
* and the API is subject to change.
- *
+ *
* Default: null
*
* @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound
@@ -727,7 +727,7 @@ public class ReadOptions extends RocksObject {
* It limits the maximum cumulative value size of the keys in batch while
* reading through MultiGet. Once the cumulative value size exceeds this
* soft limit then all the remaining keys are returned with status Aborted.
- *
+ *
* Default: {@code std::numeric_limits
* Default: {@code std::numeric_limits
* Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++.
*
@@ -27,6 +27,6 @@ public class RestoreOptions extends RocksObject {
super(newRestoreOptions(keepLogFiles));
}
- private native static long newRestoreOptions(boolean keepLogFiles);
+ private static native long newRestoreOptions(boolean keepLogFiles);
@Override protected final native void disposeInternal(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java
index 8d7a867ee..2b9de4b8e 100644
--- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java
+++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java
@@ -11,10 +11,10 @@ import java.util.List;
* RocksCallbackObject is similar to {@link RocksObject} but varies
* in its construction as it is designed for Java objects which have functions
* which are called from C++ via JNI.
- *
+ *
* RocksCallbackObject is the base-class any RocksDB classes that acts as a
* callback from some underlying underlying native C++ {@code rocksdb} object.
- *
+ *
* The use of {@code RocksObject} should always be preferred over
* {@link RocksCallbackObject} if callbacks are not required.
*/
diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java
index 77484288f..fb35208bc 100644
--- a/java/src/main/java/org/rocksdb/RocksDB.java
+++ b/java/src/main/java/org/rocksdb/RocksDB.java
@@ -9,10 +9,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.IOException;
import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
import org.rocksdb.util.Environment;
@@ -343,7 +340,7 @@ public class RocksDB extends RocksObject {
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified
* options and db path.
- *
+ *
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.
@@ -365,7 +362,7 @@ public class RocksDB extends RocksObject {
* The factory constructor of RocksDB that opens a RocksDB instance in
* Read-Only mode given the path to the database using the specified
* options and db path.
- *
+ *
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.
@@ -501,7 +498,7 @@ public class RocksDB extends RocksObject {
/**
* Open DB as secondary instance with only the default column family.
- *
+ *
* The secondary instance can dynamically tail the MANIFEST of
* a primary that must have already been created. User can call
* {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up
@@ -538,7 +535,7 @@ public class RocksDB extends RocksObject {
/**
* Open DB as secondary instance with column families.
* You can open a subset of column families in secondary mode.
- *
+ *
* The secondary instance can dynamically tail the MANIFEST of
* a primary that must have already been created. User can call
* {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up
@@ -598,12 +595,12 @@ public class RocksDB extends RocksObject {
/**
* This is similar to {@link #close()} except that it
* throws an exception if any error occurs.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*
* @throws RocksDBException if an error occurs whilst closing.
@@ -626,12 +623,12 @@ public class RocksDB extends RocksObject {
/**
* This is similar to {@link #closeE()} except that it
* silently ignores any errors.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*/
@Override
@@ -711,8 +708,8 @@ public class RocksDB extends RocksObject {
columnFamilyOptions.nativeHandle_, cfNames);
final List
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
@@ -943,7 +940,7 @@ public class RocksDB extends RocksObject {
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
- *
+ *
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
@@ -968,7 +965,7 @@ public class RocksDB extends RocksObject {
* Supports direct buffer only.
* @param value the value associated with the specified key. Position and limit is used.
* Supports direct buffer only.
- *
+ *
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
@@ -992,7 +989,7 @@ public class RocksDB extends RocksObject {
* Supports direct buffer only.
* @param value the value associated with the specified key. Position and limit is used.
* Supports direct buffer only.
- *
+ *
* throws IllegalArgumentException if column family is not present
*
* @throws RocksDBException thrown if error happens in underlying
@@ -1215,8 +1212,8 @@ public class RocksDB extends RocksObject {
public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value)
throws RocksDBException {
assert key.isDirect() && value.isDirect();
- int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(),
- value, value.position(), value.remaining(), 0);
+ final int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(),
+ key.remaining(), value, value.position(), value.remaining(), 0);
if (result != NOT_FOUND) {
value.limit(Math.min(value.limit(), value.position() + result));
}
@@ -1248,8 +1245,9 @@ public class RocksDB extends RocksObject {
public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt,
final ByteBuffer key, final ByteBuffer value) throws RocksDBException {
assert key.isDirect() && value.isDirect();
- int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(),
- value, value.position(), value.remaining(), columnFamilyHandle.nativeHandle_);
+ final int result =
+ getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), value,
+ value.position(), value.remaining(), columnFamilyHandle.nativeHandle_);
if (result != NOT_FOUND) {
value.limit(Math.min(value.limit(), value.position() + result));
}
@@ -1261,12 +1259,12 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
@@ -1287,12 +1285,12 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
@@ -1316,18 +1314,18 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
- *
+ *
* Note: consider setting {@link WriteOptions#setSync(boolean)} true.
*
* @param writeOpt Write options for the delete
@@ -1346,18 +1344,18 @@ public class RocksDB extends RocksObject {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
* written using Merge(). Mixing SingleDelete operations with Deletes and
* Merges can result in undefined behavior.
- *
+ *
* Note: consider setting {@link WriteOptions#setSync(boolean)} true.
*
* @param columnFamilyHandle The column family to delete the key from
@@ -1374,12 +1372,11 @@ public class RocksDB extends RocksObject {
columnFamilyHandle.nativeHandle_);
}
-
/**
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -1400,7 +1397,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -1422,7 +1419,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -1444,7 +1441,7 @@ public class RocksDB extends RocksObject {
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -1501,7 +1498,7 @@ public class RocksDB extends RocksObject {
* native library.
* @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/
- public void merge(final byte[] key, int offset, int len, final byte[] value,
+ public void merge(final byte[] key, final int offset, final int len, final byte[] value,
final int vOffset, final int vLen) throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
@@ -2425,10 +2422,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a false negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(byte[])}. One way to make this lighter weight is to avoid
* doing any IOs.
@@ -2451,10 +2448,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a false negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(byte[], int, int)}. One way to make this lighter weight is to
* avoid doing any IOs.
@@ -2482,10 +2479,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a false negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter
* weight is to avoid doing any IOs.
@@ -2511,10 +2508,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a false negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this
* lighter weight is to avoid doing any IOs.
@@ -2532,9 +2529,8 @@ public class RocksDB extends RocksObject {
* @return false if the key definitely does not exist in the database,
* otherwise true.
*/
- public boolean keyMayExist(
- final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, int offset, int len,
+ public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final int offset, final int len,
/* @Nullable */ final Holder
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ReadOptions, byte[])}. One way to make this
* lighter weight is to avoid doing any IOs.
@@ -2574,10 +2570,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a true negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ReadOptions, byte[], int, int)}. One way to make this
* lighter weight is to avoid doing any IOs.
@@ -2608,10 +2604,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a true negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this
* lighter weight is to avoid doing any IOs.
@@ -2639,10 +2635,10 @@ public class RocksDB extends RocksObject {
* returns false, otherwise it returns true if the key might exist.
* That is to say that this method is probabilistic and may return false
* positives, but never a false negative.
- *
+ *
* If the caller wants to obtain value when the key
* is found in memory, then {@code valueHolder} must be set.
- *
+ *
* This check is potentially lighter-weight than invoking
* {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}.
* One way to make this lighter weight is to avoid doing any IOs.
@@ -2985,7 +2981,7 @@ public class RocksDB extends RocksObject {
* @return Snapshot {@link Snapshot} instance
*/
public Snapshot getSnapshot() {
- long snapshotHandle = getSnapshot(nativeHandle_);
+ final long snapshotHandle = getSnapshot(nativeHandle_);
if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle);
}
@@ -2994,7 +2990,7 @@ public class RocksDB extends RocksObject {
/**
* Release a previously acquired snapshot.
- *
+ *
* The caller must not use "snapshot" after this call.
*
* @param snapshot {@link Snapshot} instance
@@ -3161,7 +3157,7 @@ public class RocksDB extends RocksObject {
/**
* Reset internal stats for DB and all column families.
- *
+ *
* Note this doesn't reset {@link Options#statistics()} as it is not
* owned by DB.
*
@@ -3200,11 +3196,11 @@ public class RocksDB extends RocksObject {
/**
* Get the approximate file system space used by keys in each range.
- *
+ *
* Note that the returned sizes measure file system space usage, so
* if the user data compresses by a factor of ten, the returned
* sizes will be one-tenth the size of the corresponding user data size.
- *
+ *
* If {@code sizeApproximationFlags} defines whether the returned size
* should include the recently written data in the mem-tables (if
* the mem-table type supports it), data serialized to disk, or both.
@@ -3236,11 +3232,11 @@ public class RocksDB extends RocksObject {
/**
* Get the approximate file system space used by keys in each range for
* the default column family.
- *
+ *
* Note that the returned sizes measure file system space usage, so
* if the user data compresses by a factor of ten, the returned
* sizes will be one-tenth the size of the corresponding user data size.
- *
+ *
* If {@code sizeApproximationFlags} defines whether the returned size
* should include the recently written data in the mem-tables (if
* the mem-table type supports it), data serialized to disk, or both.
@@ -3450,7 +3446,7 @@ public class RocksDB extends RocksObject {
*/
public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions(
/* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException {
- String optionsString = getOptions(
+ final String optionsString = getOptions(
nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
return MutableColumnFamilyOptions.parse(optionsString, true);
}
@@ -3477,7 +3473,7 @@ public class RocksDB extends RocksObject {
* resulting options string into options
*/
public MutableDBOptions.MutableDBOptionsBuilder getDBOptions() throws RocksDBException {
- String optionsString = getDBOptions(nativeHandle_);
+ final String optionsString = getDBOptions(nativeHandle_);
return MutableDBOptions.parse(optionsString, true);
}
@@ -3511,7 +3507,7 @@ public class RocksDB extends RocksObject {
/**
* Takes a list of files specified by file names and
* compacts them to the specified level.
- *
+ *
* Note that the behavior is different from
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* in that CompactFiles() performs the compaction job using the CURRENT
@@ -3543,7 +3539,7 @@ public class RocksDB extends RocksObject {
/**
* Takes a list of files specified by file names and
* compacts them to the specified level.
- *
+ *
* Note that the behavior is different from
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* in that CompactFiles() performs the compaction job using the CURRENT
@@ -3586,7 +3582,7 @@ public class RocksDB extends RocksObject {
* returning.
*
*/
- public void cancelAllBackgroundWork(boolean wait) {
+ public void cancelAllBackgroundWork(final boolean wait) {
cancelAllBackgroundWork(nativeHandle_, wait);
}
@@ -3614,11 +3610,11 @@ public class RocksDB extends RocksObject {
/**
* Enable automatic compactions for the given column
* families if they were previously disabled.
- *
+ *
* The function will first set the
* {@link ColumnFamilyOptions#disableAutoCompactions()} option for each
* column family to false, after which it will schedule a flush/compaction.
- *
+ *
* NOTE: Setting disableAutoCompactions to 'false' through
* {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
* does NOT schedule a flush/compaction afterwards, and only changes the
@@ -3761,15 +3757,15 @@ public class RocksDB extends RocksObject {
/* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
throws RocksDBException {
flush(flushOptions,
- columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle));
+ columnFamilyHandle == null ? null : Collections.singletonList(columnFamilyHandle));
}
/**
* Flushes multiple column families.
- *
+ *
* If atomic flush is not enabled, this is equivalent to calling
* {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times.
- *
+ *
* If atomic flush is enabled, this will flush all column families
* specified up to the latest sequence number at the time when flush is
* requested.
@@ -3800,13 +3796,13 @@ public class RocksDB extends RocksObject {
/**
* Sync the WAL.
- *
+ *
* Note that {@link #write(WriteOptions, WriteBatch)} followed by
- * {@link #syncWal()} is not exactly the same as
+ * {@code #syncWal()} is not exactly the same as
* {@link #write(WriteOptions, WriteBatch)} with
* {@link WriteOptions#sync()} set to true; In the latter case the changes
* won't be visible until the sync is done.
- *
+ *
* Currently only works if {@link Options#allowMmapWrites()} is set to false.
*
* @throws RocksDBException if an error occurs whilst syncing
@@ -3884,7 +3880,7 @@ public class RocksDB extends RocksObject {
/**
* Retrieve the list of all files in the database after flushing the memtable.
- *
+ *
* See {@link #getLiveFiles(boolean)}.
*
* @return the live files
@@ -3898,14 +3894,14 @@ public class RocksDB extends RocksObject {
/**
* Retrieve the list of all files in the database.
- *
+ *
* In case you have multiple column families, even if {@code flushMemtable}
* is true, you still need to call {@link #getSortedWalFiles()}
- * after {@link #getLiveFiles(boolean)} to compensate for new data that
+ * after {@code #getLiveFiles(boolean)} to compensate for new data that
* arrived to already-flushed column families while other column families
* were flushing.
- *
- * NOTE: Calling {@link #getLiveFiles(boolean)} followed by
+ *
+ * NOTE: Calling {@code #getLiveFiles(boolean)} followed by
* {@link #getSortedWalFiles()} can generate a lossless backup.
*
* @param flushMemtable set to true to flush before recoding the live
@@ -4016,7 +4012,7 @@ public class RocksDB extends RocksObject {
* ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file.
- *
+ *
* (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression
@@ -4041,7 +4037,7 @@ public class RocksDB extends RocksObject {
* ingest the file into this level (2). A file that have a key range that
* overlap with the memtable key range will require us to Flush the memtable
* first before ingesting the file.
- *
+ *
* (1) External SST files can be created using {@link SstFileWriter}
* (2) We will try to ingest the files to the lowest possible level
* even if the file compression doesn't match the level compression
@@ -4207,7 +4203,7 @@ public class RocksDB extends RocksObject {
/**
* Trace DB operations.
- *
+ *
* Use {@link #endTrace()} to stop tracing.
*
* @param traceOptions the options
@@ -4219,7 +4215,7 @@ public class RocksDB extends RocksObject {
final AbstractTraceWriter traceWriter) throws RocksDBException {
startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(),
traceWriter.nativeHandle_);
- /**
+ /*
* NOTE: {@link #startTrace(long, long, long) transfers the ownership
* from Java to C++, so we must disown the native handle here.
*/
@@ -4228,7 +4224,7 @@ public class RocksDB extends RocksObject {
/**
* Stop tracing DB operations.
- *
+ *
* See {@link #startTrace(TraceOptions, AbstractTraceWriter)}
*
* @throws RocksDBException if an error occurs whilst ending the trace
@@ -4314,7 +4310,7 @@ public class RocksDB extends RocksObject {
}
private static long[] toRangeSliceHandles(final List
* All SstFileManager public functions are thread-safe.
- *
+ *
* SstFileManager is not extensible.
*/
//@ThreadSafe
@@ -55,7 +55,7 @@ public final class SstFileManager extends RocksObject {
*
* @param env the environment.
* @param logger if not null, the logger will be used to log errors.
- *
+ *
* == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@@ -75,7 +75,7 @@ public final class SstFileManager extends RocksObject {
*
* @param env the environment.
* @param logger if not null, the logger will be used to log errors.
- *
+ *
* == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@@ -100,7 +100,7 @@ public final class SstFileManager extends RocksObject {
*
* @param env the environment.
* @param logger if not null, the logger will be used to log errors.
- *
+ *
* == Deletion rate limiting specific arguments ==
* @param rateBytesPerSec how many bytes should be deleted per second, If
* this value is set to 1024 (1 Kb / sec) and we deleted a file of size
@@ -123,12 +123,11 @@ public final class SstFileManager extends RocksObject {
rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk));
}
-
/**
* Update the maximum allowed space that should be used by RocksDB, if
* the total size of the SST files exceeds {@code maxAllowedSpace}, writes to
* RocksDB will fail.
- *
+ *
* Setting {@code maxAllowedSpace} to 0 will disable this feature;
* maximum allowed space will be infinite (Default value).
*
@@ -202,7 +201,7 @@ public final class SstFileManager extends RocksObject {
/**
* Set the delete rate limit.
- *
+ *
* Zero means disable delete rate limiting and delete files immediately.
*
* @param deleteRate the delete rate limit (in bytes per second).
@@ -229,9 +228,8 @@ public final class SstFileManager extends RocksObject {
setMaxTrashDBRatio(nativeHandle_, ratio);
}
- private native static long newSstFileManager(final long handle,
- final long logger_handle, final long rateBytesPerSec,
- final double maxTrashDbRatio, final long bytesMaxDeleteChunk)
+ private static native long newSstFileManager(final long handle, final long logger_handle,
+ final long rateBytesPerSec, final double maxTrashDbRatio, final long bytesMaxDeleteChunk)
throws RocksDBException;
private native void setMaxAllowedSpaceUsage(final long handle,
final long maxAllowedSpace);
@@ -247,5 +245,5 @@ public final class SstFileManager extends RocksObject {
final long deleteRate);
private native double getMaxTrashDBRatio(final long handle);
private native void setMaxTrashDBRatio(final long handle, final double ratio);
- @Override protected final native void disposeInternal(final long handle);
+ @Override protected native void disposeInternal(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java
index bb1e94ee0..678c3519c 100644
--- a/java/src/main/java/org/rocksdb/SstFileReader.java
+++ b/java/src/main/java/org/rocksdb/SstFileReader.java
@@ -18,12 +18,12 @@ public class SstFileReader extends RocksObject {
* Returns an iterator that will iterate on all keys in the default
* column family including both keys in the DB and uncommitted keys in this
* transaction.
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be
* fetched regardless).
- *
+ *
* Caller is responsible for deleting the returned Iterator.
*
* @param readOptions Read options.
@@ -32,7 +32,7 @@ public class SstFileReader extends RocksObject {
*/
public SstFileReaderIterator newIterator(final ReadOptions readOptions) {
assert (isOwningHandle());
- long iter = newIterator(nativeHandle_, readOptions.nativeHandle_);
+ final long iter = newIterator(nativeHandle_, readOptions.nativeHandle_);
return new SstFileReaderIterator(this, iter);
}
@@ -75,7 +75,7 @@ public class SstFileReader extends RocksObject {
private native void open(final long handle, final String filePath)
throws RocksDBException;
- private native static long newSstFileReader(final long optionsHandle);
+ private static native long newSstFileReader(final long optionsHandle);
private native void verifyChecksum(final long handle) throws RocksDBException;
private native TableProperties getTableProperties(final long handle)
throws RocksDBException;
diff --git a/java/src/main/java/org/rocksdb/SstFileWriter.java b/java/src/main/java/org/rocksdb/SstFileWriter.java
index fe00c1a12..5dd0b6dd5 100644
--- a/java/src/main/java/org/rocksdb/SstFileWriter.java
+++ b/java/src/main/java/org/rocksdb/SstFileWriter.java
@@ -199,12 +199,11 @@ public class SstFileWriter extends RocksObject {
return fileSize(nativeHandle_);
}
- private native static long newSstFileWriter(
- final long envOptionsHandle, final long optionsHandle,
+ private static native long newSstFileWriter(final long envOptionsHandle, final long optionsHandle,
final long userComparatorHandle, final byte comparatorType);
- private native static long newSstFileWriter(final long envOptionsHandle,
- final long optionsHandle);
+ private static native long newSstFileWriter(
+ final long envOptionsHandle, final long optionsHandle);
private native void open(final long handle, final String filePath)
throws RocksDBException;
diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java
index d513c5f15..b1ccf08c1 100644
--- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java
+++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java
@@ -9,11 +9,11 @@ package org.rocksdb;
* Fixed prefix factory. It partitions SST files using fixed prefix of the key.
*/
public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory {
- public SstPartitionerFixedPrefixFactory(long prefixLength) {
+ public SstPartitionerFixedPrefixFactory(final long prefixLength) {
super(newSstPartitionerFixedPrefixFactory0(prefixLength));
}
- private native static long newSstPartitionerFixedPrefixFactory0(long prefixLength);
+ private static native long newSstPartitionerFixedPrefixFactory0(long prefixLength);
@Override protected final native void disposeInternal(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java
index 803456bb2..803fa37d9 100644
--- a/java/src/main/java/org/rocksdb/StateType.java
+++ b/java/src/main/java/org/rocksdb/StateType.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* The type used to refer to a thread state.
- *
+ *
* A state describes lower-level action of a thread
* such as reading / writing a file or waiting for a mutex.
*/
diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java
index 0938a6d58..9f3c9a62c 100644
--- a/java/src/main/java/org/rocksdb/Statistics.java
+++ b/java/src/main/java/org/rocksdb/Statistics.java
@@ -31,7 +31,7 @@ public class Statistics extends RocksObject {
/**
* Intentionally package-private.
- *
+ *
* Used from {@link DBOptions#statistics()}
*
* @param existingStatisticsHandle The C++ pointer to an existing statistics object
@@ -134,10 +134,11 @@ public class Statistics extends RocksObject {
return toString(nativeHandle_);
}
- private native static long newStatistics();
- private native static long newStatistics(final long otherStatisticsHandle);
- private native static long newStatistics(final byte[] ignoreHistograms);
- private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle);
+ private static native long newStatistics();
+ private static native long newStatistics(final long otherStatisticsHandle);
+ private static native long newStatistics(final byte[] ignoreHistograms);
+ private static native long newStatistics(
+ final byte[] ignoreHistograms, final long otherStatisticsHandle);
@Override protected final native void disposeInternal(final long handle);
diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java
index fb3f57150..fd00f85b2 100644
--- a/java/src/main/java/org/rocksdb/StatisticsCollector.java
+++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java
@@ -62,48 +62,39 @@ public class StatisticsCollector {
}
private Runnable collectStatistics() {
- return new Runnable() {
-
- @Override
- public void run() {
- while (_isRunning) {
- try {
- if(Thread.currentThread().isInterrupted()) {
- break;
- }
- for(final StatsCollectorInput statsCollectorInput :
- _statsCollectorInputList) {
- Statistics statistics = statsCollectorInput.getStatistics();
- StatisticsCollectorCallback statsCallback =
- statsCollectorInput.getCallback();
+ return () -> {
+ while (_isRunning) {
+ try {
+ if (Thread.currentThread().isInterrupted()) {
+ break;
+ }
+ for (final StatsCollectorInput statsCollectorInput : _statsCollectorInputList) {
+ final Statistics statistics = statsCollectorInput.getStatistics();
+ final StatisticsCollectorCallback statsCallback = statsCollectorInput.getCallback();
- // Collect ticker data
- for(final TickerType ticker : TickerType.values()) {
- if(ticker != TickerType.TICKER_ENUM_MAX) {
- final long tickerValue = statistics.getTickerCount(ticker);
- statsCallback.tickerCallback(ticker, tickerValue);
- }
+ // Collect ticker data
+ for (final TickerType ticker : TickerType.values()) {
+ if (ticker != TickerType.TICKER_ENUM_MAX) {
+ final long tickerValue = statistics.getTickerCount(ticker);
+ statsCallback.tickerCallback(ticker, tickerValue);
}
+ }
- // Collect histogram data
- for(final HistogramType histogramType : HistogramType.values()) {
- if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
- final HistogramData histogramData =
- statistics.getHistogramData(histogramType);
- statsCallback.histogramCallback(histogramType, histogramData);
- }
+ // Collect histogram data
+ for (final HistogramType histogramType : HistogramType.values()) {
+ if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
+ final HistogramData histogramData = statistics.getHistogramData(histogramType);
+ statsCallback.histogramCallback(histogramType, histogramData);
}
}
-
- Thread.sleep(_statsCollectionInterval);
- }
- catch (final InterruptedException e) {
- Thread.currentThread().interrupt();
- break;
- }
- catch (final Exception e) {
- throw new RuntimeException("Error while calculating statistics", e);
}
+
+ Thread.sleep(_statsCollectionInterval);
+ } catch (final InterruptedException e) {
+ Thread.currentThread().interrupt();
+ break;
+ } catch (final Exception e) {
+ throw new RuntimeException("Error while calculating statistics", e);
}
}
};
diff --git a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
index f3785b15f..bed7828e0 100644
--- a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
+++ b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* Callback interface provided to StatisticsCollector.
- *
+ *
* Thread safety:
* StatisticsCollector doesn't make any guarantees about thread safety.
* If the same reference of StatisticsCollectorCallback is passed to multiple
diff --git a/java/src/main/java/org/rocksdb/StatsLevel.java b/java/src/main/java/org/rocksdb/StatsLevel.java
index 58504b84a..8190e503a 100644
--- a/java/src/main/java/org/rocksdb/StatsLevel.java
+++ b/java/src/main/java/org/rocksdb/StatsLevel.java
@@ -23,7 +23,7 @@ public enum StatsLevel {
/**
* Collect all stats, including measuring duration of mutex operations.
- *
+ *
* If getting time is expensive on the platform to run, it can
* reduce scalability to more threads, especially for writes.
*/
diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java
index 033ed3ea1..5c50e700f 100644
--- a/java/src/main/java/org/rocksdb/Status.java
+++ b/java/src/main/java/org/rocksdb/Status.java
@@ -9,7 +9,7 @@ import java.util.Objects;
/**
* Represents the status returned by a function call in RocksDB.
- *
+ *
* Currently only used with {@link RocksDBException} when the
* status is not {@link Code#Ok}
*/
@@ -139,12 +139,12 @@ public class Status {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- Status status = (Status) o;
+ final Status status = (Status) o;
return code == status.code && subCode == status.subCode && Objects.equals(state, status.state);
}
diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java
index ddbccff46..547371e7c 100644
--- a/java/src/main/java/org/rocksdb/StringAppendOperator.java
+++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java
@@ -11,19 +11,19 @@ package org.rocksdb;
* two strings.
*/
public class StringAppendOperator extends MergeOperator {
- public StringAppendOperator() {
- this(',');
- }
+ public StringAppendOperator() {
+ this(',');
+ }
- public StringAppendOperator(char delim) {
- super(newSharedStringAppendOperator(delim));
- }
+ public StringAppendOperator(final char delim) {
+ super(newSharedStringAppendOperator(delim));
+ }
- public StringAppendOperator(String delim) {
- super(newSharedStringAppendOperator(delim));
- }
+ public StringAppendOperator(final String delim) {
+ super(newSharedStringAppendOperator(delim));
+ }
- private native static long newSharedStringAppendOperator(final char delim);
- private native static long newSharedStringAppendOperator(final String delim);
- @Override protected final native void disposeInternal(final long handle);
+ private static native long newSharedStringAppendOperator(final char delim);
+ private static native long newSharedStringAppendOperator(final String delim);
+ @Override protected final native void disposeInternal(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
index 5a383ade4..8dc56796a 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
@@ -82,12 +82,12 @@ public class TableFileCreationBriefInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o;
+ final TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o;
return jobId == that.jobId && Objects.equals(dbName, that.dbName)
&& Objects.equals(columnFamilyName, that.columnFamilyName)
&& Objects.equals(filePath, that.filePath) && reason == that.reason;
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
index 7742f32f1..5654603c3 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
@@ -62,12 +62,12 @@ public class TableFileCreationInfo extends TableFileCreationBriefInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- TableFileCreationInfo that = (TableFileCreationInfo) o;
+ final TableFileCreationInfo that = (TableFileCreationInfo) o;
return fileSize == that.fileSize && Objects.equals(tableProperties, that.tableProperties)
&& Objects.equals(status, that.status);
}
diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
index 8aad03ae8..9a777e333 100644
--- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
@@ -62,12 +62,12 @@ public class TableFileDeletionInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- TableFileDeletionInfo that = (TableFileDeletionInfo) o;
+ final TableFileDeletionInfo that = (TableFileDeletionInfo) o;
return jobId == that.jobId && Objects.equals(dbName, that.dbName)
&& Objects.equals(filePath, that.filePath) && Objects.equals(status, that.status);
}
diff --git a/java/src/main/java/org/rocksdb/TableFormatConfig.java b/java/src/main/java/org/rocksdb/TableFormatConfig.java
index dbe524c42..726c6f122 100644
--- a/java/src/main/java/org/rocksdb/TableFormatConfig.java
+++ b/java/src/main/java/org/rocksdb/TableFormatConfig.java
@@ -18,5 +18,5 @@ public abstract class TableFormatConfig {
*
* @return native handle address to native table instance.
*/
- abstract protected long newTableFactoryHandle();
+ protected abstract long newTableFactoryHandle();
}
diff --git a/java/src/main/java/org/rocksdb/TableProperties.java b/java/src/main/java/org/rocksdb/TableProperties.java
index 096341a4c..02b95608e 100644
--- a/java/src/main/java/org/rocksdb/TableProperties.java
+++ b/java/src/main/java/org/rocksdb/TableProperties.java
@@ -380,12 +380,12 @@ public class TableProperties {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- TableProperties that = (TableProperties) o;
+ final TableProperties that = (TableProperties) o;
return dataSize == that.dataSize && indexSize == that.indexSize
&& indexPartitions == that.indexPartitions && topLevelIndexSize == that.topLevelIndexSize
&& indexKeyIsUserKey == that.indexKeyIsUserKey
diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java
index 062df5889..38e7fad9c 100644
--- a/java/src/main/java/org/rocksdb/ThreadStatus.java
+++ b/java/src/main/java/org/rocksdb/ThreadStatus.java
@@ -15,7 +15,7 @@ public class ThreadStatus {
private final OperationType operationType;
private final long operationElapsedTime; // microseconds
private final OperationStage operationStage;
- private final long operationProperties[];
+ private final long[] operationProperties;
private final StateType stateType;
/**
@@ -113,7 +113,7 @@ public class ThreadStatus {
/**
* Get the list of properties that describe some details about the current
* operation.
- *
+ *
* Each field in might have different meanings for different operations.
*
* @return the properties
diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java
index f100bb277..c167f74c4 100644
--- a/java/src/main/java/org/rocksdb/TickerType.java
+++ b/java/src/main/java/org/rocksdb/TickerType.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* The logical mapping of tickers defined in rocksdb::Tickers.
- *
+ *
* Java byte value mappings don't align 1:1 to the c++ values. c++ rocksdb::Tickers enumeration type
* is uint32_t and java org.rocksdb.TickerType is byte, this causes mapping issues when
* rocksdb::Tickers value is greater then 127 (0x7F) for jbyte jni interface as range greater is not
diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java
index b2cc8a932..7d61a208e 100644
--- a/java/src/main/java/org/rocksdb/Transaction.java
+++ b/java/src/main/java/org/rocksdb/Transaction.java
@@ -11,7 +11,7 @@ import java.util.List;
/**
* Provides BEGIN/COMMIT/ROLLBACK transactions.
- *
+ *
* To use transactions, you must first create either an
* {@link OptimisticTransactionDB} or a {@link TransactionDB}
*
@@ -20,7 +20,7 @@ import java.util.List;
* {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)}
*
* It is up to the caller to synchronize access to this object.
- *
+ *
* See samples/src/main/java/OptimisticTransactionSample.java and
* samples/src/main/java/TransactionSample.java for some simple
* examples.
@@ -50,22 +50,22 @@ public class Transaction extends RocksObject {
* any keys successfully written (or fetched via {@link #getForUpdate}) have
* not been modified outside of this transaction since the time the snapshot
* was set.
- *
+ *
* If a snapshot has not been set, the transaction guarantees that keys have
* not been modified since the time each key was first written (or fetched via
* {@link #getForUpdate}).
- *
- * Using {@link #setSnapshot()} will provide stricter isolation guarantees
+ *
+ * Using {@code #setSnapshot()} will provide stricter isolation guarantees
* at the expense of potentially more transaction failures due to conflicts
* with other writes.
- *
- * Calling {@link #setSnapshot()} has no effect on keys written before this
+ *
+ * Calling {@code #setSnapshot()} has no effect on keys written before this
* function has been called.
- *
- * {@link #setSnapshot()} may be called multiple times if you would like to
+ *
+ * {@code #setSnapshot()} may be called multiple times if you would like to
* change the snapshot used for different operations in this transaction.
- *
- * Calling {@link #setSnapshot()} will not affect the version of Data returned
+ *
+ * Calling {@code #setSnapshot()} will not affect the version of Data returned
* by get(...) methods. See {@link #get} for more details.
*/
public void setSnapshot() {
@@ -79,19 +79,19 @@ public class Transaction extends RocksObject {
* By calling this function, the transaction will essentially call
* {@link #setSnapshot()} for you right before performing the next
* write/getForUpdate.
- *
- * Calling {@link #setSnapshotOnNextOperation()} will not affect what
+ *
+ * Calling {@code #setSnapshotOnNextOperation()} will not affect what
* snapshot is returned by {@link #getSnapshot} until the next
* write/getForUpdate is executed.
- *
+ *
* When the snapshot is created the notifier's snapshotCreated method will
* be called so that the caller can get access to the snapshot.
- *
+ *
* This is an optimization to reduce the likelihood of conflicts that
* could occur in between the time {@link #setSnapshot()} is called and the
* first write/getForUpdate operation. i.e. this prevents the following
* race-condition:
- *
+ *
* txn1->setSnapshot();
* txn2->put("A", ...);
* txn2->commit();
@@ -108,20 +108,20 @@ public class Transaction extends RocksObject {
* By calling this function, the transaction will essentially call
* {@link #setSnapshot()} for you right before performing the next
* write/getForUpdate.
- *
+ *
* Calling {@link #setSnapshotOnNextOperation()} will not affect what
* snapshot is returned by {@link #getSnapshot} until the next
* write/getForUpdate is executed.
- *
+ *
* When the snapshot is created the
* {@link AbstractTransactionNotifier#snapshotCreated(Snapshot)} method will
* be called so that the caller can get access to the snapshot.
- *
+ *
* This is an optimization to reduce the likelihood of conflicts that
* could occur in between the time {@link #setSnapshot()} is called and the
* first write/getForUpdate operation. i.e. this prevents the following
* race-condition:
- *
+ *
* txn1->setSnapshot();
* txn2->put("A", ...);
* txn2->commit();
@@ -137,38 +137,37 @@ public class Transaction extends RocksObject {
setSnapshotOnNextOperation(nativeHandle_, transactionNotifier.nativeHandle_);
}
- /**
- * Returns the Snapshot created by the last call to {@link #setSnapshot()}.
- *
- * REQUIRED: The returned Snapshot is only valid up until the next time
- * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called,
- * {@link #clearSnapshot()} is called, or the Transaction is deleted.
- *
- * @return The snapshot or null if there is no snapshot
- */
+ /**
+ * Returns the Snapshot created by the last call to {@link #setSnapshot()}.
+ *
+ * REQUIRED: The returned Snapshot is only valid up until the next time
+ * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called,
+ * {@link #clearSnapshot()} is called, or the Transaction is deleted.
+ *
+ * @return The snapshot or null if there is no snapshot
+ */
public Snapshot getSnapshot() {
assert(isOwningHandle());
final long snapshotNativeHandle = getSnapshot(nativeHandle_);
if(snapshotNativeHandle == 0) {
return null;
} else {
- final Snapshot snapshot = new Snapshot(snapshotNativeHandle);
- return snapshot;
+ return new Snapshot(snapshotNativeHandle);
}
}
/**
* Clears the current snapshot (i.e. no snapshot will be 'set')
- *
+ *
* This removes any snapshot that currently exists or is set to be created
* on the next update operation ({@link #setSnapshotOnNextOperation()}).
- *
- * Calling {@link #clearSnapshot()} has no effect on keys written before this
+ *
+ * Calling {@code #clearSnapshot()} has no effect on keys written before this
* function has been called.
- *
+ *
* If a reference to a snapshot was retrieved via {@link #getSnapshot()}, it
* will no longer be valid and should be discarded after a call to
- * {@link #clearSnapshot()}.
+ * {@code #clearSnapshot()}.
*/
public void clearSnapshot() {
assert(isOwningHandle());
@@ -186,17 +185,17 @@ public class Transaction extends RocksObject {
/**
* Write all batched keys to the db atomically.
- *
+ *
* Returns OK on success.
- *
+ *
* May return any error status that could be returned by DB:Write().
- *
+ *
* If this transaction was created by an {@link OptimisticTransactionDB}
* Status::Busy() may be returned if the transaction could not guarantee
* that there are no write conflicts. Status::TryAgain() may be returned
* if the memtable history size is not large enough
* (See max_write_buffer_number_to_maintain).
- *
+ *
* If this transaction was created by a {@link TransactionDB},
* Status::Expired() may be returned if this transaction has lived for
* longer than {@link TransactionOptions#getExpiration()}.
@@ -221,7 +220,7 @@ public class Transaction extends RocksObject {
/**
* Records the state of the transaction for future calls to
* {@link #rollbackToSavePoint()}.
- *
+ *
* May be called multiple times to set multiple save points.
*
* @throws RocksDBException if an error occurs whilst setting a save point
@@ -235,7 +234,7 @@ public class Transaction extends RocksObject {
* Undo all operations in this transaction (put, merge, delete, putLogData)
* since the most recent call to {@link #setSavePoint()} and removes the most
* recent {@link #setSavePoint()}.
- *
+ *
* If there is no previous call to {@link #setSavePoint()},
* returns Status::NotFound()
*
@@ -252,11 +251,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -285,11 +284,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -316,11 +315,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -367,11 +366,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -417,11 +416,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -454,11 +453,11 @@ public class Transaction extends RocksObject {
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -489,22 +488,22 @@ public class Transaction extends RocksObject {
* transaction after it has first been read (or after the snapshot if a
* snapshot is set in this transaction). The transaction behavior is the
* same regardless of whether the key exists or not.
- *
+ *
* Note: Currently, this function will return Status::MergeInProgress
* if the most recent write to the queried key in this batch is a Merge.
- *
+ *
* The values returned by this function are similar to
* {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}.
* If value==nullptr, then this function will not read any data, but will
* still ensure that this key cannot be written to by outside of this
* transaction.
- *
+ *
* If this transaction was created by an {@link OptimisticTransactionDB},
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}
* could cause {@link #commit()} to fail. Otherwise, it could return any error
* that could be returned by
* {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}.
- *
+ *
* If this transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -570,22 +569,22 @@ public class Transaction extends RocksObject {
* transaction after it has first been read (or after the snapshot if a
* snapshot is set in this transaction). The transaction behavior is the
* same regardless of whether the key exists or not.
- *
+ *
* Note: Currently, this function will return Status::MergeInProgress
* if the most recent write to the queried key in this batch is a Merge.
- *
+ *
* The values returned by this function are similar to
* {@link RocksDB#get(ReadOptions, byte[])}.
* If value==nullptr, then this function will not read any data, but will
* still ensure that this key cannot be written to by outside of this
* transaction.
- *
+ *
* If this transaction was created on an {@link OptimisticTransactionDB},
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}
* could cause {@link #commit()} to fail. Otherwise, it could return any error
* that could be returned by
* {@link RocksDB#get(ReadOptions, byte[])}.
- *
+ *
* If this transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -618,7 +617,7 @@ public class Transaction extends RocksObject {
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
+ *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
@@ -655,7 +654,7 @@ public class Transaction extends RocksObject {
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
+ *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
@@ -691,7 +690,7 @@ public class Transaction extends RocksObject {
/**
* A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}.
- *
+ *
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -715,7 +714,7 @@ public class Transaction extends RocksObject {
/**
* A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}.
- *
+ *
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -741,14 +740,14 @@ public class Transaction extends RocksObject {
* Returns an iterator that will iterate on all keys in the default
* column family including both keys in the DB and uncommitted keys in this
* transaction.
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be
* fetched regardless).
- *
+ *
* Caller is responsible for deleting the returned Iterator.
- *
+ *
* The returned iterator is only valid until {@link #commit()},
* {@link #rollback()}, or {@link #rollbackToSavePoint()} is called.
*
@@ -766,15 +765,15 @@ public class Transaction extends RocksObject {
* Returns an iterator that will iterate on all keys in the column family
* specified by {@code columnFamilyHandle} including both keys in the DB
* and uncommitted keys in this transaction.
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be
* fetched regardless).
- *
+ *
* Caller is responsible for calling {@link RocksIterator#close()} on
* the returned Iterator.
- *
+ *
* The returned iterator is only valid until {@link #commit()},
* {@link #rollback()}, or {@link #rollbackToSavePoint()} is called.
*
@@ -794,10 +793,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -829,12 +828,12 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link #put(ColumnFamilyHandle, byte[], byte[], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* Will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -861,10 +860,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#put(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -915,7 +914,7 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link #put(ColumnFamilyHandle, byte[][], byte[][], boolean)}
* but with with {@code assumeTracked = false}.
- *
+ *
* Allows you to specify the key and value in several parts that will be
* concatenated together.
*
@@ -956,10 +955,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -992,12 +991,12 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link #merge(ColumnFamilyHandle, byte[], byte[], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* Will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1024,10 +1023,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1052,10 +1051,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1086,12 +1085,12 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link #delete(ColumnFamilyHandle, byte[], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* Will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1117,10 +1116,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#delete(byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1168,7 +1167,7 @@ public class Transaction extends RocksObject {
/**
* Similar to{@link #delete(ColumnFamilyHandle, byte[][], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* Allows you to specify the key in several parts that will be
* concatenated together.
*
@@ -1204,10 +1203,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#singleDelete(ColumnFamilyHandle, byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1239,12 +1238,12 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link #singleDelete(ColumnFamilyHandle, byte[], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1271,10 +1270,10 @@ public class Transaction extends RocksObject {
/**
* Similar to {@link RocksDB#singleDelete(byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1324,7 +1323,7 @@ public class Transaction extends RocksObject {
/**
* Similar to{@link #singleDelete(ColumnFamilyHandle, byte[][], boolean)}
* but with {@code assumeTracked = false}.
- *
+ *
* Allows you to specify the key in several parts that will be
* concatenated together.
*
@@ -1363,10 +1362,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #put(ColumnFamilyHandle, byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1390,10 +1389,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#put(byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #put(byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1455,10 +1454,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1481,10 +1480,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#merge(byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1506,10 +1505,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #delete(ColumnFamilyHandle, byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1532,10 +1531,10 @@ public class Transaction extends RocksObject {
* Similar to {@link RocksDB#delete(byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #delete(byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -1600,13 +1599,13 @@ public class Transaction extends RocksObject {
* By default, all put/merge/delete operations will be indexed in the
* transaction so that get/getForUpdate/getIterator can search for these
* keys.
- *
+ *
* If the caller does not want to fetch the keys about to be written,
* they may want to avoid indexing as a performance optimization.
- * Calling {@link #disableIndexing()} will turn off indexing for all future
+ * Calling {@code #disableIndexing()} will turn off indexing for all future
* put/merge/delete operations until {@link #enableIndexing()} is called.
- *
- * If a key is put/merge/deleted after {@link #disableIndexing()} is called
+ *
+ * If a key is put/merge/deleted after {@code #disableIndexing()} is called
* and then is fetched via get/getForUpdate/getIterator, the result of the
* fetch is undefined.
*/
@@ -1684,7 +1683,7 @@ public class Transaction extends RocksObject {
/**
* Fetch the underlying write batch that contains all pending changes to be
* committed.
- *
+ *
* Note: You should not write or delete anything from the batch directly and
* should only use the functions in the {@link Transaction} class to
* write to this transaction.
@@ -1693,15 +1692,13 @@ public class Transaction extends RocksObject {
*/
public WriteBatchWithIndex getWriteBatch() {
assert(isOwningHandle());
- final WriteBatchWithIndex writeBatchWithIndex =
- new WriteBatchWithIndex(getWriteBatch(nativeHandle_));
- return writeBatchWithIndex;
+ return new WriteBatchWithIndex(getWriteBatch(nativeHandle_));
}
/**
* Change the value of {@link TransactionOptions#getLockTimeout()}
* (in milliseconds) for this transaction.
- *
+ *
* Has no effect on OptimisticTransactions.
*
* @param lockTimeout the timeout (in milliseconds) for locks used by this
@@ -1719,9 +1716,7 @@ public class Transaction extends RocksObject {
*/
public WriteOptions getWriteOptions() {
assert(isOwningHandle());
- final WriteOptions writeOptions =
- new WriteOptions(getWriteOptions(nativeHandle_));
- return writeOptions;
+ return new WriteOptions(getWriteOptions(nativeHandle_));
}
/**
@@ -1738,28 +1733,28 @@ public class Transaction extends RocksObject {
* If this key was previously fetched in this transaction using
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/
* {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling
- * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell
+ * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell
* the transaction that it no longer needs to do any conflict checking
* for this key.
- *
+ *
* If a key has been fetched N times via
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/
* {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then
- * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an
+ * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an
* effect if it is also called N times. If this key has been written to in
- * this transaction, {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])}
+ * this transaction, {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])}
* will have no effect.
- *
+ *
* If {@link #setSavePoint()} has been called after the
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)},
- * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any
+ * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any
* effect.
- *
+ *
* If this Transaction was created by an {@link OptimisticTransactionDB},
- * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect
+ * calling {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect
* whether this key is conflict checked at commit time.
* If this Transaction was created by a {@link TransactionDB},
- * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release
+ * calling {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release
* any held locks for this key.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
@@ -1776,28 +1771,28 @@ public class Transaction extends RocksObject {
* If this key was previously fetched in this transaction using
* {@link #getForUpdate(ReadOptions, byte[], boolean)}/
* {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling
- * {@link #undoGetForUpdate(byte[])} will tell
+ * {@code #undoGetForUpdate(byte[])} will tell
* the transaction that it no longer needs to do any conflict checking
* for this key.
- *
+ *
* If a key has been fetched N times via
* {@link #getForUpdate(ReadOptions, byte[], boolean)}/
* {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then
- * {@link #undoGetForUpdate(byte[])} will only have an
+ * {@code #undoGetForUpdate(byte[])} will only have an
* effect if it is also called N times. If this key has been written to in
- * this transaction, {@link #undoGetForUpdate(byte[])}
+ * this transaction, {@code #undoGetForUpdate(byte[])}
* will have no effect.
- *
+ *
* If {@link #setSavePoint()} has been called after the
* {@link #getForUpdate(ReadOptions, byte[], boolean)},
- * {@link #undoGetForUpdate(byte[])} will not have any
+ * {@code #undoGetForUpdate(byte[])} will not have any
* effect.
- *
+ *
* If this Transaction was created by an {@link OptimisticTransactionDB},
- * calling {@link #undoGetForUpdate(byte[])} can affect
+ * calling {@code #undoGetForUpdate(byte[])} can affect
* whether this key is conflict checked at commit time.
* If this Transaction was created by a {@link TransactionDB},
- * calling {@link #undoGetForUpdate(byte[])} may release
+ * calling {@code #undoGetForUpdate(byte[])} may release
* any held locks for this key.
*
* @param key the key to retrieve the value for.
@@ -1828,9 +1823,7 @@ public class Transaction extends RocksObject {
*/
public WriteBatch getCommitTimeWriteBatch() {
assert(isOwningHandle());
- final WriteBatch writeBatch =
- new WriteBatch(getCommitTimeWriteBatch(nativeHandle_));
- return writeBatch;
+ return new WriteBatch(getCommitTimeWriteBatch(nativeHandle_));
}
/**
@@ -1908,7 +1901,7 @@ public class Transaction extends RocksObject {
/**
* Get the execution status of the transaction.
- *
+ *
* NOTE: The execution status of an Optimistic Transaction
* never changes. This is only useful for non-optimistic transactions!
*
@@ -2045,11 +2038,10 @@ public class Transaction extends RocksObject {
private native void setSavePoint(final long handle) throws RocksDBException;
private native void rollbackToSavePoint(final long handle)
throws RocksDBException;
- private native byte[] get(final long handle, final long readOptionsHandle,
- final byte key[], final int keyLength, final long columnFamilyHandle)
- throws RocksDBException;
- private native byte[] get(final long handle, final long readOptionsHandle,
- final byte key[], final int keyLen) throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptionsHandle, final byte[] key,
+ final int keyLength, final long columnFamilyHandle) throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptionsHandle, final byte[] key,
+ final int keyLen) throws RocksDBException;
private native byte[][] multiGet(final long handle,
final long readOptionsHandle, final byte[][] keys,
final long[] columnFamilyHandles) throws RocksDBException;
@@ -2057,10 +2049,10 @@ public class Transaction extends RocksObject {
final long readOptionsHandle, final byte[][] keys)
throws RocksDBException;
private native byte[] getForUpdate(final long handle, final long readOptionsHandle,
- final byte key[], final int keyLength, final long columnFamilyHandle, final boolean exclusive,
+ final byte[] key, final int keyLength, final long columnFamilyHandle, final boolean exclusive,
final boolean doValidate) throws RocksDBException;
private native byte[] getForUpdate(final long handle, final long readOptionsHandle,
- final byte key[], final int keyLen, final boolean exclusive, final boolean doValidate)
+ final byte[] key, final int keyLen, final boolean exclusive, final boolean doValidate)
throws RocksDBException;
private native byte[][] multiGetForUpdate(final long handle,
final long readOptionsHandle, final byte[][] keys,
diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java
index 86f25fe15..105f4eff0 100644
--- a/java/src/main/java/org/rocksdb/TransactionDB.java
+++ b/java/src/main/java/org/rocksdb/TransactionDB.java
@@ -106,12 +106,12 @@ public class TransactionDB extends RocksDB
/**
* This is similar to {@link #close()} except that it
* throws an exception if any error occurs.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*
* @throws RocksDBException if an error occurs whilst closing.
@@ -129,12 +129,12 @@ public class TransactionDB extends RocksDB
/**
* This is similar to {@link #closeE()} except that it
* silently ignores any errors.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*/
@Override
@@ -233,8 +233,7 @@ public class TransactionDB extends RocksDB
private final long[] transactionIDs;
private final boolean exclusive;
- public KeyLockInfo(final String key, final long transactionIDs[],
- final boolean exclusive) {
+ public KeyLockInfo(final String key, final long[] transactionIDs, final boolean exclusive) {
this.key = key;
this.transactionIDs = transactionIDs;
this.exclusive = exclusive;
@@ -381,8 +380,7 @@ public class TransactionDB extends RocksDB
private static native long[] open(final long dbOptionsHandle,
final long transactionDbOptionsHandle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
- private native static void closeDatabase(final long handle)
- throws RocksDBException;
+ private static native void closeDatabase(final long handle) throws RocksDBException;
private native long beginTransaction(final long handle,
final long writeOptionsHandle);
private native long beginTransaction(final long handle,
diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java
index 7f4296a7c..391025d6a 100644
--- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java
+++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java
@@ -14,8 +14,8 @@ public class TransactionDBOptions extends RocksObject {
/**
* Specifies the maximum number of keys that can be locked at the same time
* per column family.
- *
- * If the number of locked keys is greater than {@link #getMaxNumLocks()},
+ *
+ * If the number of locked keys is greater than {@code #getMaxNumLocks()},
* transaction writes (or GetForUpdate) will return an error.
*
* @return The maximum number of keys that can be locked
@@ -28,7 +28,7 @@ public class TransactionDBOptions extends RocksObject {
/**
* Specifies the maximum number of keys that can be locked at the same time
* per column family.
- *
+ *
* If the number of locked keys is greater than {@link #getMaxNumLocks()},
* transaction writes (or GetForUpdate) will return an error.
*
@@ -57,7 +57,7 @@ public class TransactionDBOptions extends RocksObject {
* Increasing this value will increase the concurrency by dividing the lock
* table (per column family) into more sub-tables, each with their own
* separate mutex.
- *
+ *
* Default: 16
*
* @param numStripes The number of sub-tables
@@ -94,7 +94,7 @@ public class TransactionDBOptions extends RocksObject {
* If negative, there is no timeout. Not using a timeout is not recommended
* as it can lead to deadlocks. Currently, there is no deadlock-detection to
* recover from a deadlock.
- *
+ *
* Default: 1000
*
* @param transactionLockTimeout the default wait timeout in milliseconds
@@ -113,7 +113,7 @@ public class TransactionDBOptions extends RocksObject {
* OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
* {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
* directly).
- *
+ *
* If 0, no waiting is done if a lock cannot instantly be acquired.
* If negative, there is no timeout and will block indefinitely when acquiring
* a lock.
@@ -131,29 +131,28 @@ public class TransactionDBOptions extends RocksObject {
* OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
* {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
* directly).
- *
+ *
* If 0, no waiting is done if a lock cannot instantly be acquired.
* If negative, there is no timeout and will block indefinitely when acquiring
* a lock.
- *
+ *
* Not using a timeout can lead to deadlocks. Currently, there
* is no deadlock-detection to recover from a deadlock. While DB writes
* cannot deadlock with other DB writes, they can deadlock with a transaction.
* A negative timeout should only be used if all transactions have a small
* expiration set.
- *
+ *
* Default: 1000
*
* @param defaultLockTimeout the timeout in milliseconds when writing a key
* OUTSIDE of a transaction
* @return this TransactionDBOptions instance
*/
- public TransactionDBOptions setDefaultLockTimeout(
- final long defaultLockTimeout) {
- assert(isOwningHandle());
- setDefaultLockTimeout(nativeHandle_, defaultLockTimeout);
- return this;
- }
+ public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) {
+ assert (isOwningHandle());
+ setDefaultLockTimeout(nativeHandle_, defaultLockTimeout);
+ return this;
+ }
// /**
// * If set, the {@link TransactionDB} will use this implementation of a mutex
@@ -199,7 +198,7 @@ public class TransactionDBOptions extends RocksObject {
return this;
}
- private native static long newTransactionDBOptions();
+ private static native long newTransactionDBOptions();
private native long getMaxNumLocks(final long handle);
private native void setMaxNumLocks(final long handle,
final long maxNumLocks);
diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java
index 195fc85e4..f93d3cb3c 100644
--- a/java/src/main/java/org/rocksdb/TransactionOptions.java
+++ b/java/src/main/java/org/rocksdb/TransactionOptions.java
@@ -54,7 +54,7 @@ public class TransactionOptions extends RocksObject
/**
* The wait timeout in milliseconds when a transaction attempts to lock a key.
- *
+ *
* If 0, no waiting is done if a lock cannot instantly be acquired.
* If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
* will be used
@@ -69,11 +69,11 @@ public class TransactionOptions extends RocksObject
/**
* If positive, specifies the wait timeout in milliseconds when
* a transaction attempts to lock a key.
- *
+ *
* If 0, no waiting is done if a lock cannot instantly be acquired.
* If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
* will be used
- *
+ *
* Default: -1
*
* @param lockTimeout the lock timeout in milliseconds
@@ -88,7 +88,7 @@ public class TransactionOptions extends RocksObject
/**
* Expiration duration in milliseconds.
- *
+ *
* If non-negative, transactions that last longer than this many milliseconds
* will fail to commit. If not set, a forgotten transaction that is never
* committed, rolled back, or deleted will never relinquish any locks it
@@ -103,12 +103,12 @@ public class TransactionOptions extends RocksObject
/**
* Expiration duration in milliseconds.
- *
+ *
* If non-negative, transactions that last longer than this many milliseconds
* will fail to commit. If not set, a forgotten transaction that is never
* committed, rolled back, or deleted will never relinquish any locks it
* holds. This could prevent keys from being written by other writers.
- *
+ *
* Default: -1
*
* @param expiration the expiration duration in milliseconds
@@ -133,7 +133,7 @@ public class TransactionOptions extends RocksObject
/**
* Sets the number of traversals to make during deadlock detection.
- *
+ *
* Default: 50
*
* @param deadlockDetectDepth the number of traversals to make during
@@ -168,7 +168,7 @@ public class TransactionOptions extends RocksObject
return this;
}
- private native static long newTransactionOptions();
+ private static native long newTransactionOptions();
private native boolean isSetSnapshot(final long handle);
private native void setSetSnapshot(final long handle,
final boolean setSnapshot);
diff --git a/java/src/main/java/org/rocksdb/TransactionalDB.java b/java/src/main/java/org/rocksdb/TransactionalDB.java
index 740181989..1ba955496 100644
--- a/java/src/main/java/org/rocksdb/TransactionalDB.java
+++ b/java/src/main/java/org/rocksdb/TransactionalDB.java
@@ -8,7 +8,7 @@ package org.rocksdb;
interface TransactionalDB
* Caller is responsible for calling {@link #close()} on the returned
* transaction when it is no longer needed.
*
@@ -19,7 +19,7 @@ interface TransactionalDB
* Caller is responsible for calling {@link #close()} on the returned
* transaction when it is no longer needed.
*
@@ -32,7 +32,7 @@ interface TransactionalDB
* Caller is responsible for calling {@link #close()} on the returned
* transaction when it is no longer needed.
*
@@ -48,7 +48,7 @@ interface TransactionalDB
* Caller is responsible for calling {@link #close()} on the returned
* transaction when it is no longer needed.
*
diff --git a/java/src/main/java/org/rocksdb/TransactionalOptions.java b/java/src/main/java/org/rocksdb/TransactionalOptions.java
index d55ee900c..2175693fd 100644
--- a/java/src/main/java/org/rocksdb/TransactionalOptions.java
+++ b/java/src/main/java/org/rocksdb/TransactionalOptions.java
@@ -20,7 +20,7 @@ interface TransactionalOptions
* Default: false
*
* @param setSnapshot Whether to set a snapshot
diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java
index a7adaf4b2..2bb0c4333 100644
--- a/java/src/main/java/org/rocksdb/TtlDB.java
+++ b/java/src/main/java/org/rocksdb/TtlDB.java
@@ -125,7 +125,7 @@ public class TtlDB extends RocksDB {
cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
}
- final int ttlVals[] = new int[ttlValues.size()];
+ final int[] ttlVals = new int[ttlValues.size()];
for(int i = 0; i < ttlValues.size(); i++) {
ttlVals[i] = ttlValues.get(i);
}
@@ -144,12 +144,12 @@ public class TtlDB extends RocksDB {
*
* This is similar to {@link #close()} except that it
* throws an exception if any error occurs.
- *
+ *
* This will not fsync the WAL files.
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*
* @throws RocksDBException if an error occurs whilst closing.
@@ -172,7 +172,7 @@ public class TtlDB extends RocksDB {
* If syncing is required, the caller must first call {@link #syncWal()}
* or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
* with {@link WriteOptions#setSync(boolean)} set to true.
- *
+ *
* See also {@link #close()}.
*/
@Override
@@ -230,16 +230,13 @@ public class TtlDB extends RocksDB {
@Override protected native void disposeInternal(final long handle);
- private native static long open(final long optionsHandle,
- final String db_path, final int ttl, final boolean readOnly)
- throws RocksDBException;
- private native static long[] openCF(final long optionsHandle,
- final String db_path, final byte[][] columnFamilyNames,
- final long[] columnFamilyOptions, final int[] ttlValues,
+ private static native long open(final long optionsHandle, final String db_path, final int ttl,
+ final boolean readOnly) throws RocksDBException;
+ private static native long[] openCF(final long optionsHandle, final String db_path,
+ final byte[][] columnFamilyNames, final long[] columnFamilyOptions, final int[] ttlValues,
final boolean readOnly) throws RocksDBException;
private native long createColumnFamilyWithTtl(final long handle,
final byte[] columnFamilyName, final long columnFamilyOptions, int ttl)
throws RocksDBException;
- private native static void closeDatabase(final long handle)
- throws RocksDBException;
+ private static native void closeDatabase(final long handle) throws RocksDBException;
}
diff --git a/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java
index 837ce6157..28cb8556b 100644
--- a/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java
+++ b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java
@@ -23,7 +23,7 @@ public enum TxnDBWritePolicy {
*/
WRITE_UNPREPARED((byte)0x2);
- private byte value;
+ private final byte value;
TxnDBWritePolicy(final byte value) {
this.value = value;
diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
index cce9b298d..0cffdce8c 100644
--- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java
+++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
@@ -14,6 +14,6 @@ public class UInt64AddOperator extends MergeOperator {
super(newSharedUInt64AddOperator());
}
- private native static long newSharedUInt64AddOperator();
+ private static native long newSharedUInt64AddOperator();
@Override protected final native void disposeInternal(final long handle);
}
diff --git a/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java
index d8b9eeced..b8c098f94 100644
--- a/java/src/main/java/org/rocksdb/WALRecoveryMode.java
+++ b/java/src/main/java/org/rocksdb/WALRecoveryMode.java
@@ -9,10 +9,9 @@ package org.rocksdb;
* The WAL Recover Mode
*/
public enum WALRecoveryMode {
-
/**
* Original levelDB recovery
- *
+ *
* We tolerate incomplete record in trailing data on all logs
* Use case : This is legacy behavior (default)
*/
@@ -20,7 +19,7 @@ public enum WALRecoveryMode {
/**
* Recover from clean shutdown
- *
+ *
* We don't expect to find any corruption in the WAL
* Use case : This is ideal for unit tests and rare applications that
* can require high consistency guarantee
@@ -44,7 +43,7 @@ public enum WALRecoveryMode {
*/
SkipAnyCorruptedRecords((byte)0x03);
- private byte value;
+ private final byte value;
WALRecoveryMode(final byte value) {
this.value = value;
diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
index ce146eb3f..e0b99b1b5 100644
--- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
+++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
@@ -18,12 +18,12 @@ public class WBWIRocksIterator
/**
* Get the current entry
- *
+ *
* The WriteEntry is only valid
* until the iterator is repositioned.
* If you want to keep the WriteEntry across iterator
* movements, you must make a copy of its data!
- *
+ *
* Note - This method is not thread-safe with respect to the WriteEntry
* as it performs a non-atomic update across the fields of the WriteEntry
*
diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java
index 37e36213a..a2836634a 100644
--- a/java/src/main/java/org/rocksdb/WalFilter.java
+++ b/java/src/main/java/org/rocksdb/WalFilter.java
@@ -12,13 +12,12 @@ import java.util.Map;
* records or modify their processing on recovery.
*/
public interface WalFilter {
-
/**
* Provide ColumnFamily->LogNumber map to filter
* so that filter can determine whether a log number applies to a given
* column family (i.e. that log hasn't been flushed to SST already for the
* column family).
- *
+ *
* We also pass in name>id map as only name is known during
* recovery (as handles are opened post-recovery).
* while write batch callbacks happen in terms of column family id.
diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java
index 889602edc..3a9c2be0e 100644
--- a/java/src/main/java/org/rocksdb/WalProcessingOption.java
+++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java
@@ -6,7 +6,7 @@
package org.rocksdb;
public enum WalProcessingOption {
- /**
+ /*
* Continue processing as usual.
*/
CONTINUE_PROCESSING((byte)0x0),
diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java
index 9b46108d0..49e1f7f20 100644
--- a/java/src/main/java/org/rocksdb/WriteBatch.java
+++ b/java/src/main/java/org/rocksdb/WriteBatch.java
@@ -9,16 +9,16 @@ import java.nio.ByteBuffer;
/**
* WriteBatch holds a collection of updates to apply atomically to a DB.
- *
+ *
* The updates are applied in the order in which they are added
* to the WriteBatch. For example, the value of "key" will be "v3"
* after the following batch is written:
- *
+ *
* batch.put("key", "v1");
* batch.remove("key");
* batch.put("key", "v2");
* batch.put("key", "v3");
- *
+ *
* Multiple threads can invoke const methods on a WriteBatch without
* external synchronization, but if any of the threads may call a
* non-const method, all threads accessing the same WriteBatch must use
@@ -180,7 +180,7 @@ public class WriteBatch extends AbstractWriteBatch {
/**
* Gets the WAL termination point.
- *
+ *
* See {@link #markWalTerminationPoint()}
*
* @return the WAL termination point
@@ -260,9 +260,8 @@ public class WriteBatch extends AbstractWriteBatch {
@Override final native void setMaxBytes(final long nativeHandle,
final long maxBytes);
- private native static long newWriteBatch(final int reserved_bytes);
- private native static long newWriteBatch(final byte[] serialized,
- final int serializedLength);
+ private static native long newWriteBatch(final int reserved_bytes);
+ private static native long newWriteBatch(final byte[] serialized, final int serializedLength);
private native void iterate(final long handle, final long handlerHandle)
throws RocksDBException;
private native byte[] data(final long nativeHandle) throws RocksDBException;
@@ -282,10 +281,9 @@ public class WriteBatch extends AbstractWriteBatch {
/**
* Handler callback for iterating over the contents of a batch.
*/
- public static abstract class Handler
- extends RocksCallbackObject {
+ public abstract static class Handler extends RocksCallbackObject {
public Handler() {
- super(null);
+ super(0L);
}
@Override
diff --git a/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/java/src/main/java/org/rocksdb/WriteBatchInterface.java
index 92caa22b3..32cd8d1e7 100644
--- a/java/src/main/java/org/rocksdb/WriteBatchInterface.java
+++ b/java/src/main/java/org/rocksdb/WriteBatchInterface.java
@@ -136,12 +136,12 @@ public interface WriteBatchInterface {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
@@ -160,12 +160,12 @@ public interface WriteBatchInterface {
* Remove the database entry for {@code key}. Requires that the key exists
* and was not overwritten. It is not an error if the key did not exist
* in the database.
- *
+ *
* If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
* times), then the result of calling SingleDelete() on this key is undefined.
* SingleDelete() only behaves correctly if there has been only one Put()
* for this key since the previous call to SingleDelete() for this key.
- *
+ *
* This feature is currently an experimental performance optimization
* for a very specific workload. It is up to the caller to ensure that
* SingleDelete is only used for a key that is not deleted using Delete() or
@@ -186,7 +186,7 @@ public interface WriteBatchInterface {
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -203,7 +203,7 @@ public interface WriteBatchInterface {
* Removes the database entries in the range ["beginKey", "endKey"), i.e.,
* including "beginKey" and excluding "endKey". a non-OK status on error. It
* is not an error if no keys exist in the range ["beginKey", "endKey").
- *
+ *
* Delete the database entry (if any) for "key". Returns OK on success, and a
* non-OK status on error. It is not an error if "key" did not exist in the
* database.
@@ -224,9 +224,9 @@ public interface WriteBatchInterface {
* it will not be persisted to the SST files. When iterating over this
* WriteBatch, WriteBatch::Handler::LogData will be called with the contents
* of the blob as it is encountered. Blobs, puts, deletes, and merges will be
- * encountered in the same order in thich they were inserted. The blob will
+ * encountered in the same order in which they were inserted. The blob will
* NOT consume sequence number(s) and will NOT increase the count of the batch
- *
+ *
* Example application: add timestamps to the transaction log for use in
* replication.
*
@@ -257,7 +257,7 @@ public interface WriteBatchInterface {
/**
* Pop the most recent save point.
- *
+ *
* That is to say that it removes the last save point,
* which was set by {@link #setSavePoint()}.
*
diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
index d85b8e3f7..d41be5856 100644
--- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
+++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
@@ -10,10 +10,10 @@ import java.nio.ByteBuffer;
/**
* Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable
* index built for all the keys inserted.
- *
+ *
* Calling put, merge, remove or putLogData calls the same function
* as with {@link org.rocksdb.WriteBatch} whilst also building an index.
- *
+ *
* A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
* create an iterator over the write batch or
* {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
@@ -22,7 +22,7 @@ import java.nio.ByteBuffer;
public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
* Creates a WriteBatchWithIndex where no bytes
- * are reserved up-front, bytewise comparison is
+ * are reserved up-front, byte wise comparison is
* used for fallback key comparisons,
* and duplicate keys operations are retained
*/
@@ -30,10 +30,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
super(newWriteBatchWithIndex());
}
-
/**
* Creates a WriteBatchWithIndex where no bytes
- * are reserved up-front, bytewise comparison is
+ * are reserved up-front, byte wise comparison is
* used for fallback key comparisons, and duplicate key
* assignment is determined by the constructor argument
*
@@ -48,9 +47,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
* Creates a WriteBatchWithIndex
*
- * @param fallbackIndexComparator We fallback to this comparator
+ * @param fallbackIndexComparator We fall back to this comparator
* to compare keys within a column family if we cannot determine
- * the column family and so look up it's comparator.
+ * the column family and so look up its comparator.
*
* @param reservedBytes reserved bytes in underlying WriteBatch
*
@@ -115,7 +114,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* Provides Read-Your-Own-Writes like functionality by
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
* as a delta and baseIterator as a base
- *
+ *
* Updating write batch with the current key of the iterator is not safe.
* We strongly recommend users not to do it. It will invalidate the current
* key() and value() of the iterator. This invalidation happens even before
@@ -138,7 +137,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* Provides Read-Your-Own-Writes like functionality by
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
* as a delta and baseIterator as a base
- *
+ *
* Updating write batch with the current key of the iterator is not safe.
* We strongly recommend users not to do it. It will invalidate the current
* key() and value() of the iterator. This invalidation happens even before
@@ -173,7 +172,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @param baseIterator The base iterator,
* e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @return An iterator which shows a view comprised of both the database
- * point-in-timefrom baseIterator and modifications made in this write batch.
+ * point-in-time from baseIterator and modifications made in this write batch.
*/
public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator, null);
@@ -189,7 +188,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @param readOptions the read options, or null
* @return An iterator which shows a view comprised of both the database
- * point-in-timefrom baseIterator and modifications made in this write batch.
+ * point-in-time from baseIterator and modifications made in this write batch.
*/
public RocksIterator newIteratorWithBase(final RocksIterator baseIterator,
/* @Nullable */ final ReadOptions readOptions) {
@@ -238,11 +237,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
* Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also
* read writes from this batch.
- *
+ *
* This function will query both this batch and the DB and then merge
* the results using the DB's merge operator (if the batch contains any
* merge requests).
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is
* read from the DB but will NOT change which keys are read from the batch
* (the keys in this batch do not yet belong to any snapshot and will be
@@ -268,11 +267,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
* Similar to {@link RocksDB#get(byte[])} but will also
* read writes from this batch.
- *
+ *
* This function will query both this batch and the DB and then merge
* the results using the DB's merge operator (if the batch contains any
* merge requests).
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is
* read from the DB but will NOT change which keys are read from the batch
* (the keys in this batch do not yet belong to any snapshot and will be
@@ -338,12 +337,10 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
final long maxBytes);
@Override final native WriteBatch getWriteBatch(final long handle);
- private native static long newWriteBatchWithIndex();
- private native static long newWriteBatchWithIndex(final boolean overwriteKey);
- private native static long newWriteBatchWithIndex(
- final long fallbackIndexComparatorHandle,
- final byte comparatorType, final int reservedBytes,
- final boolean overwriteKey);
+ private static native long newWriteBatchWithIndex();
+ private static native long newWriteBatchWithIndex(final boolean overwriteKey);
+ private static native long newWriteBatchWithIndex(final long fallbackIndexComparatorHandle,
+ final byte comparatorType, final int reservedBytes, final boolean overwriteKey);
private native long iterator0(final long handle);
private native long iterator1(final long handle, final long cfHandle);
private native long iteratorWithBase(final long handle, final long cfHandle,
diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java
index 8ec963958..3364d6eab 100644
--- a/java/src/main/java/org/rocksdb/WriteBufferManager.java
+++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java
@@ -15,7 +15,7 @@ public class WriteBufferManager extends RocksObject {
/**
* Construct a new instance of WriteBufferManager.
- *
+ *
* Check
* https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager
* for more details on when to use it
@@ -40,11 +40,11 @@ public class WriteBufferManager extends RocksObject {
return allowStall_;
}
- private native static long newWriteBufferManager(
+ private static native long newWriteBufferManager(
final long bufferSizeBytes, final long cacheHandle, final boolean allowStall);
@Override
protected native void disposeInternal(final long handle);
- private boolean allowStall_;
+ private final boolean allowStall_;
}
diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java
index 5a3ffa6c5..7c184b094 100644
--- a/java/src/main/java/org/rocksdb/WriteOptions.java
+++ b/java/src/main/java/org/rocksdb/WriteOptions.java
@@ -7,7 +7,7 @@ package org.rocksdb;
/**
* Options that control write operations.
- *
+ *
* Note that developers should call WriteOptions.dispose() to release the
* c++ side memory before a WriteOptions instance runs out of scope.
*/
@@ -28,33 +28,32 @@ public class WriteOptions extends RocksObject {
/**
* Copy constructor for WriteOptions.
- *
+ *
* NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter,
* compaction_filter_factory and other pointers will be cloned!
*
* @param other The ColumnFamilyOptions to copy.
*/
- public WriteOptions(WriteOptions other) {
+ public WriteOptions(final WriteOptions other) {
super(copyWriteOptions(other.nativeHandle_));
}
-
/**
* If true, the write will be flushed from the operating system
* buffer cache (by calling WritableFile::Sync()) before the write
* is considered complete. If this flag is true, writes will be
* slower.
- *
+ *
* If this flag is false, and the machine crashes, some recent
* writes may be lost. Note that if it is just the process that
* crashes (i.e., the machine does not reboot), no writes will be
* lost even if sync==false.
- *
+ *
* In other words, a DB write with sync==false has similar
* crash semantics as the "write()" system call. A DB write
* with sync==true has similar crash semantics to a "write()"
* system call followed by "fdatasync()".
- *
+ *
* Default: false
*
* @param flag a boolean flag to indicate whether a write
@@ -71,12 +70,12 @@ public class WriteOptions extends RocksObject {
* buffer cache (by calling WritableFile::Sync()) before the write
* is considered complete. If this flag is true, writes will be
* slower.
- *
+ *
* If this flag is false, and the machine crashes, some recent
* writes may be lost. Note that if it is just the process that
* crashes (i.e., the machine does not reboot), no writes will be
* lost even if sync==false.
- *
+ *
* In other words, a DB write with sync==false has similar
* crash semantics as the "write()" system call. A DB write
* with sync==true has similar crash semantics to a "write()"
@@ -121,7 +120,7 @@ public class WriteOptions extends RocksObject {
* If true and if user is trying to write to column families that don't exist
* (they were dropped), ignore the write (don't return an error). If there
* are multiple writes in a WriteBatch, other writes will succeed.
- *
+ *
* Default: false
*
* @param ignoreMissingColumnFamilies true to ignore writes to column families
@@ -138,7 +137,7 @@ public class WriteOptions extends RocksObject {
* If true and if user is trying to write to column families that don't exist
* (they were dropped), ignore the write (don't return an error). If there
* are multiple writes in a WriteBatch, other writes will succeed.
- *
+ *
* Default: false
*
* @return true if writes to column families which don't exist are ignored
@@ -175,7 +174,7 @@ public class WriteOptions extends RocksObject {
* will be cancelled immediately with {@link Status.Code#Incomplete} returned.
* Otherwise, it will be slowed down. The slowdown value is determined by
* RocksDB to guarantee it introduces minimum impacts to high priority writes.
- *
+ *
* Default: false
*
* @param lowPri true if the write request should be of lower priority than
@@ -191,7 +190,7 @@ public class WriteOptions extends RocksObject {
/**
* Returns true if this write request is of lower priority if compaction is
* behind.
- *
+ *
* See {@link #setLowPri(boolean)}.
*
* @return true if this write request is of lower priority, false otherwise.
@@ -206,7 +205,7 @@ public class WriteOptions extends RocksObject {
* in concurrent writes if keys in one writebatch are sequential. In
* non-concurrent writes (when {@code concurrent_memtable_writes} is false) this
* option will be ignored.
- *
+ *
* Default: false
*
* @return true if writebatch will maintain the last insert positions of each memtable as hints in
@@ -222,7 +221,7 @@ public class WriteOptions extends RocksObject {
* in concurrent writes if keys in one writebatch are sequential. In
* non-concurrent writes (when {@code concurrent_memtable_writes} is false) this
* option will be ignored.
- *
+ *
* Default: false
*
* @param memtableInsertHintPerBatch true if writebatch should maintain the last insert positions
@@ -234,8 +233,8 @@ public class WriteOptions extends RocksObject {
return this;
}
- private native static long newWriteOptions();
- private native static long copyWriteOptions(long handle);
+ private static native long newWriteOptions();
+ private static native long copyWriteOptions(long handle);
@Override protected final native void disposeInternal(final long handle);
private native void setSync(long handle, boolean flag);
diff --git a/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java
index 4aef0eda9..1cade0acb 100644
--- a/java/src/main/java/org/rocksdb/WriteStallInfo.java
+++ b/java/src/main/java/org/rocksdb/WriteStallInfo.java
@@ -51,12 +51,12 @@ public class WriteStallInfo {
}
@Override
- public boolean equals(Object o) {
+ public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
- WriteStallInfo that = (WriteStallInfo) o;
+ final WriteStallInfo that = (WriteStallInfo) o;
return Objects.equals(columnFamilyName, that.columnFamilyName)
&& currentCondition == that.currentCondition && previousCondition == that.previousCondition;
}
diff --git a/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java
index 46685f9fd..d57258009 100644
--- a/java/src/test/java/org/rocksdb/AbstractTransactionTest.java
+++ b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java
@@ -5,26 +5,22 @@
package org.rocksdb;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.fail;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
/**
* Base class of {@link TransactionTest} and {@link OptimisticTransactionTest}
*/
public abstract class AbstractTransactionTest {
-
- protected final static byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf"
- .getBytes();
+ protected static final byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf".getBytes();
protected static final Random rand = PlatformRandomHelper.
getPlatformSpecificRandomFactory();
@@ -107,8 +103,8 @@ public abstract class AbstractTransactionTest {
@Test
public void commit() throws RocksDBException {
- final byte k1[] = "rollback-key1".getBytes(UTF_8);
- final byte v1[] = "rollback-value1".getBytes(UTF_8);
+ final byte[] k1 = "rollback-key1".getBytes(UTF_8);
+ final byte[] v1 = "rollback-value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb()) {
try(final Transaction txn = dbContainer.beginTransaction()) {
txn.put(k1, v1);
@@ -124,8 +120,8 @@ public abstract class AbstractTransactionTest {
@Test
public void rollback() throws RocksDBException {
- final byte k1[] = "rollback-key1".getBytes(UTF_8);
- final byte v1[] = "rollback-value1".getBytes(UTF_8);
+ final byte[] k1 = "rollback-key1".getBytes(UTF_8);
+ final byte[] v1 = "rollback-value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb()) {
try(final Transaction txn = dbContainer.beginTransaction()) {
txn.put(k1, v1);
@@ -141,10 +137,10 @@ public abstract class AbstractTransactionTest {
@Test
public void savePoint() throws RocksDBException {
- final byte k1[] = "savePoint-key1".getBytes(UTF_8);
- final byte v1[] = "savePoint-value1".getBytes(UTF_8);
- final byte k2[] = "savePoint-key2".getBytes(UTF_8);
- final byte v2[] = "savePoint-value2".getBytes(UTF_8);
+ final byte[] k1 = "savePoint-key1".getBytes(UTF_8);
+ final byte[] v1 = "savePoint-value1".getBytes(UTF_8);
+ final byte[] k2 = "savePoint-key2".getBytes(UTF_8);
+ final byte[] v2 = "savePoint-value2".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions()) {
@@ -179,8 +175,8 @@ public abstract class AbstractTransactionTest {
@Test
public void getPut_cf() throws RocksDBException {
- final byte k1[] = "key1".getBytes(UTF_8);
- final byte v1[] = "value1".getBytes(UTF_8);
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
final Transaction txn = dbContainer.beginTransaction()) {
@@ -193,8 +189,8 @@ public abstract class AbstractTransactionTest {
@Test
public void getPut() throws RocksDBException {
- final byte k1[] = "key1".getBytes(UTF_8);
- final byte v1[] = "value1".getBytes(UTF_8);
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
final Transaction txn = dbContainer.beginTransaction()) {
@@ -279,8 +275,8 @@ public abstract class AbstractTransactionTest {
@Test
public void getForUpdate_cf() throws RocksDBException {
- final byte k1[] = "key1".getBytes(UTF_8);
- final byte v1[] = "value1".getBytes(UTF_8);
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
final Transaction txn = dbContainer.beginTransaction()) {
@@ -293,8 +289,8 @@ public abstract class AbstractTransactionTest {
@Test
public void getForUpdate() throws RocksDBException {
- final byte k1[] = "key1".getBytes(UTF_8);
- final byte v1[] = "value1".getBytes(UTF_8);
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
final Transaction txn = dbContainer.beginTransaction()) {
@@ -306,12 +302,8 @@ public abstract class AbstractTransactionTest {
@Test
public void multiGetForUpdate_cf() throws RocksDBException {
- final byte keys[][] = new byte[][] {
- "key1".getBytes(UTF_8),
- "key2".getBytes(UTF_8)};
- final byte values[][] = new byte[][] {
- "value1".getBytes(UTF_8),
- "value2".getBytes(UTF_8)};
+ final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)};
+ final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)};
try(final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
@@ -331,12 +323,8 @@ public abstract class AbstractTransactionTest {
@Test
public void multiGetForUpdate() throws RocksDBException {
- final byte keys[][] = new byte[][]{
- "key1".getBytes(UTF_8),
- "key2".getBytes(UTF_8)};
- final byte values[][] = new byte[][]{
- "value1".getBytes(UTF_8),
- "value2".getBytes(UTF_8)};
+ final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)};
+ final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)};
try (final DBContainer dbContainer = startDb();
final ReadOptions readOptions = new ReadOptions();
@@ -349,6 +337,53 @@ public abstract class AbstractTransactionTest {
}
}
+ @Test
+ public void multiGetForUpdateAsList_cf() throws RocksDBException {
+ final List
* {@link ...}
* {@link ...}
*/
@@ -34,8 +34,8 @@ public class BytewiseComparatorRegressionTest {
@Rule public TemporaryFolder temporarySSTFolder = new TemporaryFolder();
- private final static byte[][] testData = {{10, -11, 13}, {10, 11, 12}, {10, 11, 14}};
- private final static byte[][] orderedData = {{10, 11, 12}, {10, 11, 14}, {10, -11, 13}};
+ private static final byte[][] testData = {{10, -11, 13}, {10, 11, 12}, {10, 11, 14}};
+ private static final byte[][] orderedData = {{10, 11, 12}, {10, 11, 14}, {10, -11, 13}};
/**
* {@link ...}
@@ -43,12 +43,16 @@ public class BytewiseComparatorRegressionTest {
@Test
public void testJavaComparator() throws RocksDBException {
final BytewiseComparator comparator = new BytewiseComparator(new ComparatorOptions());
- performTest(new Options().setCreateIfMissing(true).setComparator(comparator));
+ try (final Options options = new Options().setCreateIfMissing(true).setComparator(comparator)) {
+ performTest(options);
+ }
}
@Test
public void testDefaultComparator() throws RocksDBException {
- performTest(new Options().setCreateIfMissing(true));
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ performTest(options);
+ }
}
/**
@@ -56,8 +60,10 @@ public class BytewiseComparatorRegressionTest {
*/
@Test
public void testCppComparator() throws RocksDBException {
- performTest(new Options().setCreateIfMissing(true).setComparator(
- BuiltinComparator.BYTEWISE_COMPARATOR));
+ try (final Options options = new Options().setCreateIfMissing(true).setComparator(
+ BuiltinComparator.BYTEWISE_COMPARATOR)) {
+ performTest(options);
+ }
}
private void performTest(final Options options) throws RocksDBException {
diff --git a/java/src/test/java/org/rocksdb/CheckPointTest.java b/java/src/test/java/org/rocksdb/CheckPointTest.java
index c2cc6fc62..2b3cc7a3b 100644
--- a/java/src/test/java/org/rocksdb/CheckPointTest.java
+++ b/java/src/test/java/org/rocksdb/CheckPointTest.java
@@ -59,8 +59,7 @@ public class CheckPointTest {
@Test(expected = IllegalArgumentException.class)
public void failIfDbIsNull() {
- try (final Checkpoint checkpoint = Checkpoint.create(null)) {
-
+ try (final Checkpoint ignored = Checkpoint.create(null)) {
}
}
diff --git a/java/src/test/java/org/rocksdb/ClockCacheTest.java b/java/src/test/java/org/rocksdb/ClockCacheTest.java
index d1241ac75..718c24f70 100644
--- a/java/src/test/java/org/rocksdb/ClockCacheTest.java
+++ b/java/src/test/java/org/rocksdb/ClockCacheTest.java
@@ -18,8 +18,7 @@ public class ClockCacheTest {
final long capacity = 1000;
final int numShardBits = 16;
final boolean strictCapacityLimit = true;
- try(final Cache clockCache = new ClockCache(capacity,
- numShardBits, strictCapacityLimit)) {
+ try (final Cache ignored = new ClockCache(capacity, numShardBits, strictCapacityLimit)) {
//no op
}
}
diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
index 7d7581048..a5fe8cef7 100644
--- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
+++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
@@ -9,7 +9,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;
import org.junit.ClassRule;
@@ -27,11 +26,11 @@ public class ColumnFamilyOptionsTest {
@Test
public void copyConstructor() {
- ColumnFamilyOptions origOpts = new ColumnFamilyOptions();
+ final ColumnFamilyOptions origOpts = new ColumnFamilyOptions();
origOpts.setNumLevels(rand.nextInt(8));
origOpts.setTargetFileSizeMultiplier(rand.nextInt(100));
origOpts.setLevel0StopWritesTrigger(rand.nextInt(50));
- ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts);
+ final ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts);
assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels());
assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier());
assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger());
@@ -39,7 +38,7 @@ public class ColumnFamilyOptionsTest {
@Test
public void getColumnFamilyOptionsFromProps() {
- Properties properties = new Properties();
+ final Properties properties = new Properties();
properties.put("write_buffer_size", "112");
properties.put("max_write_buffer_number", "13");
@@ -90,16 +89,15 @@ public class ColumnFamilyOptionsTest {
@Test(expected = IllegalArgumentException.class)
public void failColumnFamilyOptionsFromPropsWithNullValue() {
- try (final ColumnFamilyOptions opt =
+ try (final ColumnFamilyOptions ignored =
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) {
}
}
@Test(expected = IllegalArgumentException.class)
public void failColumnFamilyOptionsFromPropsWithEmptyProps() {
- try (final ColumnFamilyOptions opt =
- ColumnFamilyOptions.getColumnFamilyOptionsFromProps(
- new Properties())) {
+ try (final ColumnFamilyOptions ignored =
+ ColumnFamilyOptions.getColumnFamilyOptionsFromProps(new Properties())) {
}
}
@@ -455,7 +453,7 @@ public class ColumnFamilyOptionsTest {
}
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
- for (CompressionType compressionType : compressionTypeList) {
+ for (final CompressionType compressionType : compressionTypeList) {
assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION);
}
diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
index e98327d93..fb8a45085 100644
--- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
+++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
@@ -22,16 +22,14 @@ public class ColumnFamilyTest {
public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
new RocksNativeLibraryResource();
- @Rule
- public TemporaryFolder dbFolder = new TemporaryFolder();
+ @Rule public TemporaryFolder dbFolder = new TemporaryFolder();
@Test
public void columnFamilyDescriptorName() throws RocksDBException {
final byte[] cfName = "some_name".getBytes(UTF_8);
try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) {
- final ColumnFamilyDescriptor cfDescriptor =
- new ColumnFamilyDescriptor(cfName, cfOptions);
+ final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, cfOptions);
assertThat(cfDescriptor.getName()).isEqualTo(cfName);
}
}
@@ -40,24 +38,23 @@ public class ColumnFamilyTest {
public void columnFamilyDescriptorOptions() throws RocksDBException {
final byte[] cfName = "some_name".getBytes(UTF_8);
- try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
- .setCompressionType(CompressionType.BZLIB2_COMPRESSION)) {
+ try (final ColumnFamilyOptions cfOptions =
+ new ColumnFamilyOptions().setCompressionType(CompressionType.BZLIB2_COMPRESSION)) {
final ColumnFamilyDescriptor cfDescriptor =
new ColumnFamilyDescriptor(cfName, cfOptions);
- assertThat(cfDescriptor.getOptions().compressionType())
- .isEqualTo(CompressionType.BZLIB2_COMPRESSION);
+ assertThat(cfDescriptor.getOptions().compressionType())
+ .isEqualTo(CompressionType.BZLIB2_COMPRESSION);
}
}
@Test
public void listColumnFamilies() throws RocksDBException {
try (final Options options = new Options().setCreateIfMissing(true);
- final RocksDB db = RocksDB.open(options,
- dbFolder.getRoot().getAbsolutePath())) {
+ final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
// Test listColumnFamilies
- final List