diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 2f0d4f3ca..fd7eef4d4 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -7,8 +7,8 @@ package org.rocksdb; /** * A CompactionFilter allows an application to modify/delete a key-value at * the time of compaction. - * - * At present we just permit an overriding Java class to wrap a C++ + *

+ * At present, we just permit an overriding Java class to wrap a C++ * implementation */ public abstract class AbstractCompactionFilter> @@ -49,10 +49,10 @@ public abstract class AbstractCompactionFilter> /** * Deletes underlying C++ compaction pointer. - * + *

* Note that this function should be called only after all * RocksDB instances referencing the compaction filter are closed. - * Otherwise an undefined behavior will occur. + * Otherwise, an undefined behavior will occur. */ @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java index 380b4461d..4bb985a34 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -15,7 +15,7 @@ public abstract class AbstractCompactionFilterFactory * The name will be printed to the LOG file on start up for diagnosis * * @return name which identifies this compaction filter. diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index a89e79048..83e0f0676 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -31,7 +31,7 @@ public abstract class AbstractComparator /** * Get the type of this comparator. - * + *

* Used for determining the correct C++ cast in native code. * * @return The type of the comparator. @@ -44,11 +44,11 @@ public abstract class AbstractComparator * The name of the comparator. Used to check for comparator * mismatches (i.e., a DB created with one comparator is * accessed using a different comparator). - * + *

* A new name should be used whenever * the comparator implementation changes in a way that will cause * the relative ordering of any two keys to change. - * + *

* Names starting with "rocksdb." are reserved and should not be used. * * @return The name of this comparator implementation diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java index b732d2495..2d1bf702b 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java +++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java @@ -18,108 +18,102 @@ import java.nio.ByteBuffer; * {@link org.rocksdb.AbstractComparator} clean. */ class AbstractComparatorJniBridge { + /** + * Only called from JNI. + *

+ * Simply a bridge to calling + * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer lengths are correct + * before and after the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)} + * @param a buffer access to first key + * @param aLen the length of the a key, + * may be smaller than the buffer {@code a} + * @param b buffer access to second key + * @param bLen the length of the b key, + * may be smaller than the buffer {@code b} + * + * @return the result of the comparison + */ + private static int compareInternal(final AbstractComparator comparator, final ByteBuffer a, + final int aLen, final ByteBuffer b, final int bLen) { + if (aLen != -1) { + a.mark(); + a.limit(aLen); + } + if (bLen != -1) { + b.mark(); + b.limit(bLen); + } - /** - * Only called from JNI. - * - * Simply a bridge to calling - * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}, - * which ensures that the byte buffer lengths are correct - * before and after the call. - * - * @param comparator the comparator object on which to - * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)} - * @param a buffer access to first key - * @param aLen the length of the a key, - * may be smaller than the buffer {@code a} - * @param b buffer access to second key - * @param bLen the length of the b key, - * may be smaller than the buffer {@code b} - * - * @return the result of the comparison - */ - private static int compareInternal( - final AbstractComparator comparator, - final ByteBuffer a, final int aLen, - final ByteBuffer b, final int bLen) { - if (aLen != -1) { - a.mark(); - a.limit(aLen); - } - if (bLen != -1) { - b.mark(); - b.limit(bLen); - } + final int c = comparator.compare(a, b); - final int c = comparator.compare(a, b); + if (aLen != -1) { + a.reset(); + } + if (bLen != -1) { + b.reset(); + } - if (aLen != -1) { - a.reset(); - } - if (bLen != -1) { - b.reset(); - } + return c; + } - return c; + /** + * Only called from JNI. + *

+ * Simply a bridge to calling + * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer lengths are correct + * before the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)} + * @param start buffer access to the start key + * @param startLen the length of the start key, + * may be smaller than the buffer {@code start} + * @param limit buffer access to the limit key + * @param limitLen the length of the limit key, + * may be smaller than the buffer {@code limit} + * + * @return either {@code startLen} if the start key is unchanged, otherwise + * the new length of the start key + */ + private static int findShortestSeparatorInternal(final AbstractComparator comparator, + final ByteBuffer start, final int startLen, final ByteBuffer limit, final int limitLen) { + if (startLen != -1) { + start.limit(startLen); } - - /** - * Only called from JNI. - * - * Simply a bridge to calling - * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, - * which ensures that the byte buffer lengths are correct - * before the call. - * - * @param comparator the comparator object on which to - * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)} - * @param start buffer access to the start key - * @param startLen the length of the start key, - * may be smaller than the buffer {@code start} - * @param limit buffer access to the limit key - * @param limitLen the length of the limit key, - * may be smaller than the buffer {@code limit} - * - * @return either {@code startLen} if the start key is unchanged, otherwise - * the new length of the start key - */ - private static int findShortestSeparatorInternal( - final AbstractComparator comparator, - final ByteBuffer start, final int startLen, - final ByteBuffer limit, final int limitLen) { - if (startLen != -1) { - start.limit(startLen); - } - if (limitLen != -1) { - limit.limit(limitLen); - } - comparator.findShortestSeparator(start, limit); - return start.remaining(); + if (limitLen != -1) { + limit.limit(limitLen); } + comparator.findShortestSeparator(start, limit); + return start.remaining(); + } - /** - * Only called from JNI. - * - * Simply a bridge to calling - * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, - * which ensures that the byte buffer length is correct - * before the call. - * - * @param comparator the comparator object on which to - * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)} - * @param key buffer access to the key - * @param keyLen the length of the key, - * may be smaller than the buffer {@code key} - * - * @return either keyLen if the key is unchanged, otherwise the new length of the key - */ - private static int findShortSuccessorInternal( - final AbstractComparator comparator, - final ByteBuffer key, final int keyLen) { - if (keyLen != -1) { - key.limit(keyLen); - } - comparator.findShortSuccessor(key); - return key.remaining(); + /** + * Only called from JNI. + *

+ * Simply a bridge to calling + * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer length is correct + * before the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)} + * @param key buffer access to the key + * @param keyLen the length of the key, + * may be smaller than the buffer {@code key} + * + * @return either keyLen if the key is unchanged, otherwise the new length of the key + */ + private static int findShortSuccessorInternal( + final AbstractComparator comparator, final ByteBuffer key, final int keyLen) { + if (keyLen != -1) { + key.limit(keyLen); } + comparator.findShortSuccessor(key); + return key.remaining(); + } } diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java index 6698acf88..d640d3423 100644 --- a/java/src/main/java/org/rocksdb/AbstractEventListener.java +++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -71,8 +71,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen /** * Creates an Event Listener that will - * received all callbacks from C++. - * + * receive all callbacks from C++. + *

* If you don't need all callbacks, it is much more efficient to * just register for the ones you need by calling * {@link #AbstractEventListener(EnabledEventCallback...)} instead. @@ -106,8 +106,8 @@ public abstract class AbstractEventListener extends RocksCallbackObject implemen */ private static long packToLong(final EnabledEventCallback... enabledEventCallbacks) { long l = 0; - for (int i = 0; i < enabledEventCallbacks.length; i++) { - l |= 1 << enabledEventCallbacks[i].getValue(); + for (final EnabledEventCallback enabledEventCallback : enabledEventCallbacks) { + l |= 1L << enabledEventCallback.getValue(); } return l; } diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java index 7189272b8..1a6251bd4 100644 --- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java +++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -53,25 +53,23 @@ public abstract class AbstractMutableOptions { return buffer.toString(); } - public static abstract class AbstractMutableOptionsBuilder< - T extends AbstractMutableOptions, - U extends AbstractMutableOptionsBuilder, - K extends MutableOptionKey> { - + public abstract static class AbstractMutableOptionsBuilder< + T extends AbstractMutableOptions, U extends AbstractMutableOptionsBuilder, K + extends MutableOptionKey> { private final Map> options = new LinkedHashMap<>(); private final List unknown = new ArrayList<>(); protected abstract U self(); /** - * Get all of the possible keys + * Get all the possible keys * * @return A map of all keys, indexed by name. */ protected abstract Map allKeys(); /** - * Construct a sub-class instance of {@link AbstractMutableOptions}. + * Construct a subclass instance of {@link AbstractMutableOptions}. * * @param keys the keys * @param values the values @@ -224,7 +222,7 @@ public abstract class AbstractMutableOptions { private long parseAsLong(final String value) { try { return Long.parseLong(value); - } catch (NumberFormatException nfe) { + } catch (final NumberFormatException nfe) { final double doubleValue = Double.parseDouble(value); if (doubleValue != Math.round(doubleValue)) throw new IllegalArgumentException("Unable to parse or round " + value + " to long"); @@ -242,7 +240,7 @@ public abstract class AbstractMutableOptions { private int parseAsInt(final String value) { try { return Integer.parseInt(value); - } catch (NumberFormatException nfe) { + } catch (final NumberFormatException nfe) { final double doubleValue = Double.parseDouble(value); if (doubleValue != Math.round(doubleValue)) throw new IllegalArgumentException("Unable to parse or round " + value + " to int"); @@ -271,7 +269,7 @@ public abstract class AbstractMutableOptions { throw new IllegalArgumentException("options string is invalid: " + option); } fromOptionString(option, ignoreUnknown); - } catch (NumberFormatException nfe) { + } catch (final NumberFormatException nfe) { throw new IllegalArgumentException( "" + option.key + "=" + option.value + " - not a valid value for its type", nfe); } @@ -287,7 +285,7 @@ public abstract class AbstractMutableOptions { * @param ignoreUnknown if this is not set, throw an exception when a key is not in the known * set * @return the same object, after adding options - * @throws IllegalArgumentException if the key is unkown, or a value has the wrong type/form + * @throws IllegalArgumentException if the key is unknown, or a value has the wrong type/form */ private U fromOptionString(final OptionString.Entry option, final boolean ignoreUnknown) throws IllegalArgumentException { @@ -299,7 +297,7 @@ public abstract class AbstractMutableOptions { unknown.add(option); return self(); } else if (key == null) { - throw new IllegalArgumentException("Key: " + key + " is not a known option key"); + throw new IllegalArgumentException("Key: " + null + " is not a known option key"); } if (!option.value.isList()) { @@ -341,7 +339,7 @@ public abstract class AbstractMutableOptions { return setIntArray(key, value); case ENUM: - String optionName = key.name(); + final String optionName = key.name(); if (optionName.equals("prepopulate_blob_cache")) { final PrepopulateBlobCache prepopulateBlobCache = PrepopulateBlobCache.getFromInternal(valueStr); diff --git a/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/java/src/main/java/org/rocksdb/AbstractNativeReference.java index 88b2963b6..1ce54fcba 100644 --- a/java/src/main/java/org/rocksdb/AbstractNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractNativeReference.java @@ -16,8 +16,9 @@ package org.rocksdb; * try-with-resources * statement, when you are finished with the object. It is no longer - * called automatically during the regular Java GC process via + * called automatically during the regular Java GC process via finalization * {@link AbstractNativeReference#finalize()}.

+ * which is deprecated from Java 9. *

* Explanatory note - When or if the Garbage Collector calls {@link Object#finalize()} * depends on the JVM implementation and system conditions, which the programmer diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index 5a22e2956..0681b6758 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -8,7 +8,7 @@ package org.rocksdb; /** * Slices are used by RocksDB to provide * efficient access to keys and values. - * + *

* This class is package private, implementers * should extend either of the public abstract classes: * @see org.rocksdb.Slice @@ -147,7 +147,7 @@ public abstract class AbstractSlice extends RocksMutableObject { */ @Override public boolean equals(final Object other) { - if (other != null && other instanceof AbstractSlice) { + if (other instanceof AbstractSlice) { return compare((AbstractSlice)other) == 0; } else { return false; @@ -172,7 +172,7 @@ public abstract class AbstractSlice extends RocksMutableObject { } } - protected native static long createNewSliceFromString(final String str); + protected static native long createNewSliceFromString(final String str); private native int size0(long handle); private native boolean empty0(long handle); private native String toString0(long handle, boolean hex); @@ -183,7 +183,7 @@ public abstract class AbstractSlice extends RocksMutableObject { * Deletes underlying C++ slice pointer. * Note that this function should be called only after all * RocksDB instances referencing the slice are closed. - * Otherwise an undefined behavior will occur. + * Otherwise, an undefined behavior will occur. */ @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/AbstractTraceWriter.java b/java/src/main/java/org/rocksdb/AbstractTraceWriter.java index 806709b1f..13edfbd84 100644 --- a/java/src/main/java/org/rocksdb/AbstractTraceWriter.java +++ b/java/src/main/java/org/rocksdb/AbstractTraceWriter.java @@ -62,7 +62,7 @@ public abstract class AbstractTraceWriter private static short statusToShort(final Status.Code code, final Status.SubCode subCode) { - short result = (short)(code.getValue() << 8); + final short result = (short) (code.getValue() << 8); return (short)(result | subCode.getValue()); } diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java index cbb49836d..b117e5cc2 100644 --- a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -41,10 +41,10 @@ public abstract class AbstractTransactionNotifier /** * Deletes underlying C++ TransactionNotifier pointer. - * + *

* Note that this function should be called only after all * Transactions referencing the comparator are closed. - * Otherwise an undefined behavior will occur. + * Otherwise, an undefined behavior will occur. */ @Override protected void disposeInternal() { diff --git a/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/java/src/main/java/org/rocksdb/AbstractWalFilter.java index d525045c6..fc77eab8e 100644 --- a/java/src/main/java/org/rocksdb/AbstractWalFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractWalFilter.java @@ -41,7 +41,7 @@ public abstract class AbstractWalFilter private static short logRecordFoundResultToShort( final LogRecordFoundResult logRecordFoundResult) { - short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8); + final short result = (short) (logRecordFoundResult.walProcessingOption.getValue() << 8); return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0)); } diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 9527a2fd9..41d967f53 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -20,25 +20,25 @@ public abstract class AbstractWriteBatch extends RocksObject } @Override - public void put(byte[] key, byte[] value) throws RocksDBException { + public void put(final byte[] key, final byte[] value) throws RocksDBException { put(nativeHandle_, key, key.length, value, value.length); } @Override - public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, - byte[] value) throws RocksDBException { + public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value) + throws RocksDBException { put(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @Override - public void merge(byte[] key, byte[] value) throws RocksDBException { + public void merge(final byte[] key, final byte[] value) throws RocksDBException { merge(nativeHandle_, key, key.length, value, value.length); } @Override - public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, - byte[] value) throws RocksDBException { + public void merge(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value) throws RocksDBException { merge(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @@ -53,7 +53,7 @@ public abstract class AbstractWriteBatch extends RocksObject } @Override - public void put(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, + public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { assert key.isDirect() && value.isDirect(); putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(), @@ -63,12 +63,12 @@ public abstract class AbstractWriteBatch extends RocksObject } @Override - public void delete(byte[] key) throws RocksDBException { + public void delete(final byte[] key) throws RocksDBException { delete(nativeHandle_, key, key.length); } @Override - public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) + public void delete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) throws RocksDBException { delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); } @@ -80,7 +80,7 @@ public abstract class AbstractWriteBatch extends RocksObject } @Override - public void delete(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) + public void delete(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) throws RocksDBException { deleteDirect( nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_); @@ -88,31 +88,30 @@ public abstract class AbstractWriteBatch extends RocksObject } @Override - public void singleDelete(byte[] key) throws RocksDBException { + public void singleDelete(final byte[] key) throws RocksDBException { singleDelete(nativeHandle_, key, key.length); } @Override - public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) throws RocksDBException { singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); } @Override - public void deleteRange(byte[] beginKey, byte[] endKey) - throws RocksDBException { + public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException { deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length); } @Override - public void deleteRange(ColumnFamilyHandle columnFamilyHandle, - byte[] beginKey, byte[] endKey) throws RocksDBException { + public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey, + final byte[] endKey) throws RocksDBException { deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length, columnFamilyHandle.nativeHandle_); } @Override - public void putLogData(byte[] blob) throws RocksDBException { + public void putLogData(final byte[] blob) throws RocksDBException { putLogData(nativeHandle_, blob, blob.length); } diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index 5338bc42d..d1d1123dd 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -9,12 +9,12 @@ import java.util.List; /** * Advanced Column Family Options which are not - * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface} - * + * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}) + *

* Taken from include/rocksdb/advanced_options.h */ public interface AdvancedColumnFamilyOptionsInterface< - T extends AdvancedColumnFamilyOptionsInterface> { + T extends AdvancedColumnFamilyOptionsInterface & ColumnFamilyOptionsInterface> { /** * The minimum number of write buffers that will be merged together * before writing to storage. If set to 1, then @@ -51,23 +51,23 @@ public interface AdvancedColumnFamilyOptionsInterface< * this parameter does not affect flushing. * This controls the minimum amount of write history that will be available * in memory for conflict checking when Transactions are used. - * + *

* When using an OptimisticTransactionDB: * If this value is too low, some transactions may fail at commit time due * to not being able to determine whether there were any write conflicts. - * + *

* When using a TransactionDB: * If Transaction::SetSnapshot is used, TransactionDB will read either * in-memory write buffers or SST files to do write-conflict checking. * Increasing this value can reduce the number of reads to SST files * done for conflict detection. - * + *

* Setting this value to 0 will cause write buffers to be freed immediately * after they are flushed. * If this value is set to -1, * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} * will be used. - * + *

* Default: * If using a TransactionDB/OptimisticTransactionDB, the default value will * be set to the value of @@ -336,14 +336,13 @@ public interface AdvancedColumnFamilyOptionsInterface< /** * Set compaction style for DB. - * + *

* Default: LEVEL. * * @param compactionStyle Compaction style. * @return the reference to the current options. */ - ColumnFamilyOptionsInterface setCompactionStyle( - CompactionStyle compactionStyle); + ColumnFamilyOptionsInterface setCompactionStyle(CompactionStyle compactionStyle); /** * Compaction style for DB. @@ -355,7 +354,7 @@ public interface AdvancedColumnFamilyOptionsInterface< /** * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL}, * for each level, which files are prioritized to be picked to compact. - * + *

* Default: {@link CompactionPriority#ByCompensatedSize} * * @param compactionPriority The compaction priority @@ -444,7 +443,7 @@ public interface AdvancedColumnFamilyOptionsInterface< * By default, RocksDB runs consistency checks on the LSM every time the LSM * changes (Flush, Compaction, AddFile). Use this option if you need to * disable them. - * + *

* Default: true * * @param forceConsistencyChecks false to disable consistency checks diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 162d15d80..c8fc84173 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * Advanced Column Family Options which are mutable - * + *

* Taken from include/rocksdb/advanced_options.h * and MutableCFOptions in util/cf_options.h */ @@ -58,8 +58,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, * create prefix bloom for memtable with the size of * write_buffer_size * memtable_prefix_bloom_size_ratio. - * If it is larger than 0.25, it is santinized to 0.25. - * + * If it is larger than 0.25, it is sanitized to 0.25. + *

* Default: 0 (disabled) * * @param memtablePrefixBloomSizeRatio the ratio of memtable used by the @@ -73,8 +73,8 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, * create prefix bloom for memtable with the size of * write_buffer_size * memtable_prefix_bloom_size_ratio. - * If it is larger than 0.25, it is santinized to 0.25. - * + * If it is larger than 0.25, it is sanitized to 0.25. + *

* Default: 0 (disabled) * * @return the ratio of memtable used by the bloom filter @@ -85,7 +85,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Threshold used in the MemPurge (memtable garbage collection) * feature. A value of 0.0 corresponds to no MemPurge, * a value of 1.0 will trigger a MemPurge as often as possible. - * + *

* Default: 0.0 (disabled) * * @param experimentalMempurgeThreshold the threshold used by @@ -98,7 +98,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Threshold used in the MemPurge (memtable garbage collection) * feature. A value of 0.0 corresponds to no MemPurge, * a value of 1.0 will trigger a MemPurge as often as possible. - * + *

* Default: 0 (disabled) * * @return the threshold used by the MemPurge decider @@ -109,7 +109,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Enable whole key bloom filter in memtable. Note this will only take effect * if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering * can potentially reduce CPU usage for point-look-ups. - * + *

* Default: false (disabled) * * @param memtableWholeKeyFiltering true if whole key bloom filter is enabled @@ -154,12 +154,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * The size of one block in arena memory allocation. * If ≤ 0, a proper value is automatically calculated (usually 1/10 of * writer_buffer_size). - * + *

* There are two additional restriction of the specified size: * (1) size should be in the range of [4096, 2 << 30] and * (2) be the multiple of the CPU word (which helps with the memory * alignment). - * + *

* We'll automatically check and adjust the size number to make sure it * conforms to the restrictions. * Default: 0 @@ -175,12 +175,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * The size of one block in arena memory allocation. * If ≤ 0, a proper value is automatically calculated (usually 1/10 of * writer_buffer_size). - * + *

* There are two additional restriction of the specified size: * (1) size should be in the range of [4096, 2 << 30] and * (2) be the multiple of the CPU word (which helps with the memory * alignment). - * + *

* We'll automatically check and adjust the size number to make sure it * conforms to the restrictions. * Default: 0 @@ -294,7 +294,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * @param multiplier the ratio between the total size of level-(L+1) * files and the total size of level-L files for all L. * @return the reference to the current options. - * + *

* See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)} */ T setMaxBytesForLevelMultiplier(double multiplier); @@ -306,7 +306,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * * @return the ratio between the total size of level-(L+1) files and * the total size of level-L files for all L. - * + *

* See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()} */ double maxBytesForLevelMultiplier(); @@ -315,7 +315,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Different max-size multipliers for different levels. * These are multiplied by max_bytes_for_level_multiplier to arrive * at the max-size of each level. - * + *

* Default: 1 * * @param maxBytesForLevelMultiplierAdditional The max-size multipliers @@ -329,7 +329,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Different max-size multipliers for different levels. * These are multiplied by max_bytes_for_level_multiplier to arrive * at the max-size of each level. - * + *

* Default: 1 * * @return The max-size multipliers for each level @@ -339,7 +339,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * All writes will be slowed down to at least delayed_write_rate if estimated * bytes needed to be compaction exceed this threshold. - * + *

* Default: 64GB * * @param softPendingCompactionBytesLimit The soft limit to impose on @@ -352,7 +352,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * All writes will be slowed down to at least delayed_write_rate if estimated * bytes needed to be compaction exceed this threshold. - * + *

* Default: 64GB * * @return The soft limit to impose on compaction @@ -362,7 +362,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * All writes are stopped if estimated bytes needed to be compaction exceed * this threshold. - * + *

* Default: 256GB * * @param hardPendingCompactionBytesLimit The hard limit to impose on @@ -375,7 +375,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * All writes are stopped if estimated bytes needed to be compaction exceed * this threshold. - * + *

* Default: 256GB * * @return The hard limit to impose on compaction @@ -390,7 +390,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Default: 8 * * @param maxSequentialSkipInIterations the number of keys could - * be skipped in a iteration. + * be skipped in an iteration. * @return the reference to the current options. */ T setMaxSequentialSkipInIterations( @@ -403,19 +403,19 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * skipped before a reseek is issued. * Default: 8 * - * @return the number of keys could be skipped in a iteration. + * @return the number of keys could be skipped in an iteration. */ long maxSequentialSkipInIterations(); /** * Maximum number of successive merge operations on a key in the memtable. - * + *

* When a merge operation is added to the memtable and the maximum number of * successive merges is reached, the value of the key will be calculated and * inserted into the memtable instead of the merge operation. This will * ensure that there are never more than max_successive_merges merge * operations in the memtable. - * + *

* Default: 0 (disabled) * * @param maxSuccessiveMerges the maximum number of successive merges. @@ -428,13 +428,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Maximum number of successive merge operations on a key in the memtable. - * + *

* When a merge operation is added to the memtable and the maximum number of * successive merges is reached, the value of the key will be calculated and * inserted into the memtable instead of the merge operation. This will * ensure that there are never more than max_successive_merges merge * operations in the memtable. - * + *

* Default: 0 (disabled) * * @return the maximum number of successive merges. @@ -443,7 +443,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * After writing every SST file, reopen it and read all the keys. - * + *

* Default: false * * @param paranoidFileChecks true to enable paranoid file checks @@ -454,7 +454,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * After writing every SST file, reopen it and read all the keys. - * + *

* Default: false * * @return true if paranoid file checks are enabled @@ -463,7 +463,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Measure IO stats in compactions and flushes, if true. - * + *

* Default: false * * @param reportBgIoStats true to enable reporting @@ -483,11 +483,11 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Non-bottom-level files older than TTL will go through the compaction * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be * set to -1. - * + *

* Enabled only for level compaction for now. - * + *

* Default: 0 (disabled) - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -500,7 +500,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Get the TTL for Non-bottom-level files that will go through the compaction * process. - * + *

* See {@link #setTtl(long)}. * * @return the time-to-live. @@ -513,18 +513,18 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * One main use of the feature is to make sure a file goes through compaction * filters periodically. Users can also use the feature to clear up SST * files using old format. - * + *

* A file's age is computed by looking at file_creation_time or creation_time * table properties in order, if they have valid non-zero values; if not, the * age is based on the file's last modified time (given by the underlying * Env). - * + *

* Supported in Level and FIFO compaction. * In FIFO compaction, this option has the same meaning as TTL and whichever * stricter will be used. * Pre-req: max_open_file == -1. * unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60 - * + *

* Values: * 0: Turn off Periodic compactions. * UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature @@ -534,9 +534,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * In FIFO compaction, since the option has the same meaning as ttl, * when this value is left default, and ttl is left to 0, 30 days will be * used. Otherwise, min(ttl, periodic_compaction_seconds) will be used. - * + *

* Default: 0xfffffffffffffffe (allow RocksDB to auto-tune) - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -548,7 +548,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Get the periodicCompactionSeconds. - * + *

* See {@link #setPeriodicCompactionSeconds(long)}. * * @return the periodic compaction in seconds. @@ -566,9 +566,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * for reads. See also the options min_blob_size, blob_file_size, * blob_compression_type, enable_blob_garbage_collection, and * blob_garbage_collection_age_cutoff below. - * + *

* Default: false - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -585,9 +585,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * for reads. See also the options min_blob_size, blob_file_size, * blob_compression_type, enable_blob_garbage_collection, and * blob_garbage_collection_age_cutoff below. - * + *

* Default: false - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -601,9 +601,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * alongside the keys in SST files in the usual fashion. A value of zero for * this option means that all values are stored in blob files. Note that * enable_blob_files has to be set in order for this option to have any effect. - * + *

* Default: 0 - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -618,9 +618,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * alongside the keys in SST files in the usual fashion. A value of zero for * this option means that all values are stored in blob files. Note that * enable_blob_files has to be set in order for this option to have any effect. - * + *

* Default: 0 - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -632,9 +632,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Set the size limit for blob files. When writing blob files, a new file is opened * once this limit is reached. Note that enable_blob_files has to be set in * order for this option to have any effect. - * + *

* Default: 256 MB - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -656,9 +656,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * Set the compression algorithm to use for large values stored in blob files. Note * that enable_blob_files has to be set in order for this option to have any * effect. - * + *

* Default: no compression - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -683,7 +683,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * relocated to new files as they are encountered during compaction, which makes * it possible to clean up blob files once they contain nothing but * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. - * + *

* Default: false * * @param enableBlobGarbageCollection the new enabled/disabled state of blob garbage collection @@ -698,7 +698,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * relocated to new files as they are encountered during compaction, which makes * it possible to clean up blob files once they contain nothing but * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. - * + *

* Default: false * * @return true if blob garbage collection is currently enabled. @@ -711,7 +711,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * where N = garbage_collection_cutoff * number_of_blob_files. Note that * enable_blob_garbage_collection has to be set in order for this option to have * any effect. - * + *

* Default: 0.25 * * @param blobGarbageCollectionAgeCutoff the new age cutoff @@ -725,7 +725,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * where N = garbage_collection_cutoff * number_of_blob_files. Note that * enable_blob_garbage_collection has to be set in order for this option to have * any effect. - * + *

* Default: 0.25 * * @return the current age cutoff for garbage collection @@ -738,12 +738,12 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< * the blob files in question, assuming they are all eligible based on the * value of {@link #blobGarbageCollectionAgeCutoff} above. This option is * currently only supported with leveled compactions. - * + *

* Note that {@link #enableBlobGarbageCollection} has to be set in order for this * option to have any effect. - * + *

* Default: 1.0 - * + *

* Dynamically changeable through the SetOptions() API * * @param blobGarbageCollectionForceThreshold new value for the threshold @@ -752,16 +752,16 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< T setBlobGarbageCollectionForceThreshold(double blobGarbageCollectionForceThreshold); /** - * Get the current value for the {@link #blobGarbageCollectionForceThreshold} + * Get the current value for the {@code #blobGarbageCollectionForceThreshold} * @return the current threshold at which garbage collection of blobs is forced */ double blobGarbageCollectionForceThreshold(); /** * Set compaction readahead for blob files. - * + *

* Default: 0 - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -780,9 +780,9 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Set a certain LSM tree level to enable blob files. - * + *

* Default: 0 - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * @@ -794,7 +794,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Get the starting LSM tree level to enable blob files. - * + *

* Default: 0 * * @return the current LSM tree level to enable blob files. @@ -803,13 +803,13 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Set a certain prepopulate blob cache option. - * + *

* Default: 0 - * + *

* Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * - * @param prepopulateBlobCache the prepopulate blob cache option + * @param prepopulateBlobCache prepopulate the blob cache option * * @return the reference to the current options. */ @@ -817,7 +817,7 @@ public interface AdvancedMutableColumnFamilyOptionsInterface< /** * Get the prepopulate blob cache option. - * + *

* Default: 0 * * @return the current prepopulate blob cache option. diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 515824a91..3ab220683 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -9,7 +9,7 @@ import java.util.List; /** * BackupEngine allows you to backup * and restore the database - * + *

* Be aware, that `new BackupEngine` takes time proportional to the amount * of backups. So if you have a slow filesystem to backup * and you have a lot of backups then restoring can take some time. @@ -39,12 +39,12 @@ public class BackupEngine extends RocksObject implements AutoCloseable { /** * Captures the state of the database in the latest backup - * + *

* Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with * the flushBeforeBackup parameter set to false * * @param db The database to backup - * + *

* Note - This method is not thread safe * * @throws RocksDBException thrown if a new backup could not be created @@ -72,7 +72,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * always be consistent with the current state of the * database regardless of the flushBeforeBackup * parameter. - * + *

* Note - This method is not thread safe * * @throws RocksDBException thrown if a new backup could not be created @@ -105,7 +105,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * always be consistent with the current state of the * database regardless of the flushBeforeBackup * parameter. - * + *

* Note - This method is not thread safe * * @throws RocksDBException thrown if a new backup could not be created @@ -179,11 +179,11 @@ public class BackupEngine extends RocksObject implements AutoCloseable { /** * Restore the database from a backup - * + *

* IMPORTANT: if options.share_table_files == true and you restore the DB * from some backup that is not the latest, and you start creating new * backups from the new DB, they will probably fail! - * + *

* Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. * If you add new data to the DB and try creating a new backup now, the * database will diverge from backups 4 and 5 and the new backup will fail. @@ -226,7 +226,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { restoreOptions.nativeHandle_); } - private native static long open(final long env, final long backupEngineOptions) + private static native long open(final long env, final long backupEngineOptions) throws RocksDBException; private native void createNewBackup(final long handle, final long dbHandle, diff --git a/java/src/main/java/org/rocksdb/BackupEngineOptions.java b/java/src/main/java/org/rocksdb/BackupEngineOptions.java index 6e2dacc02..2a358faac 100644 --- a/java/src/main/java/org/rocksdb/BackupEngineOptions.java +++ b/java/src/main/java/org/rocksdb/BackupEngineOptions.java @@ -25,7 +25,7 @@ public class BackupEngineOptions extends RocksObject { /** *

BackupEngineOptions constructor.

* - * @param path Where to keep the backup files. Has to be different than db + * @param path Where to keep the backup files. Has to be different from db * name. Best to set this to {@code db name_ + "/backups"} * @throws java.lang.IllegalArgumentException if illegal path is used. */ @@ -55,9 +55,9 @@ public class BackupEngineOptions extends RocksObject { /** * Backup Env object. It will be used for backup file I/O. If it's - * null, backups will be written out using DBs Env. Otherwise + * null, backups will be written out using DBs Env. Otherwise, * backup's I/O will be performed using this object. - * + *

* Default: null * * @param env The environment to use @@ -72,9 +72,9 @@ public class BackupEngineOptions extends RocksObject { /** * Backup Env object. It will be used for backup file I/O. If it's - * null, backups will be written out using DBs Env. Otherwise + * null, backups will be written out using DBs Env. Otherwise, * backup's I/O will be performed using this object. - * + *

* Default: null * * @return The environment in use @@ -128,7 +128,7 @@ public class BackupEngineOptions extends RocksObject { /** * Set the logger to use for Backup info and error messages - * + *

* Default: null * * @return The logger in use for the backup @@ -143,7 +143,7 @@ public class BackupEngineOptions extends RocksObject { * @param sync If {@code sync == true}, we can guarantee you'll get consistent * backup even on a machine crash/reboot. Backup process is slower with sync * enabled. If {@code sync == false}, we don't guarantee anything on machine - * reboot. However, chances are some of the backups are consistent. + * reboot. However, chances are some backups are consistent. * *

Default: true

* @@ -194,7 +194,7 @@ public class BackupEngineOptions extends RocksObject { /** *

Set if log files shall be persisted.

* - * @param backupLogFiles If false, we won't backup log files. This option can + * @param backupLogFiles If false, we won't back up log files. This option can * be useful for backing up in-memory databases where log file are * persisted, but table files are in memory. * @@ -250,7 +250,7 @@ public class BackupEngineOptions extends RocksObject { /** * Backup rate limiter. Used to control transfer speed for backup. If this is * not null, {@link #backupRateLimit()} is ignored. - * + *

* Default: null * * @param backupRateLimiter The rate limiter to use for the backup @@ -266,7 +266,7 @@ public class BackupEngineOptions extends RocksObject { /** * Backup rate limiter. Used to control transfer speed for backup. If this is * not null, {@link #backupRateLimit()} is ignored. - * + *

* Default: null * * @return The rate limiter in use for the backup @@ -308,7 +308,7 @@ public class BackupEngineOptions extends RocksObject { /** * Restore rate limiter. Used to control transfer speed during restore. If * this is not null, {@link #restoreRateLimit()} is ignored. - * + *

* Default: null * * @param restoreRateLimiter The rate limiter to use during restore @@ -324,7 +324,7 @@ public class BackupEngineOptions extends RocksObject { /** * Restore rate limiter. Used to control transfer speed during restore. If * this is not null, {@link #restoreRateLimit()} is ignored. - * + *

* Default: null * * @return The rate limiter in use during restore @@ -400,7 +400,7 @@ public class BackupEngineOptions extends RocksObject { /** * During backup user can get callback every time next * {@link #callbackTriggerIntervalSize()} bytes being copied. - * + *

* Default: 4194304 * * @param callbackTriggerIntervalSize The interval size for the @@ -416,8 +416,8 @@ public class BackupEngineOptions extends RocksObject { /** * During backup user can get callback every time next - * {@link #callbackTriggerIntervalSize()} bytes being copied. - * + * {@code #callbackTriggerIntervalSize()} bytes being copied. + *

* Default: 4194304 * * @return The interval size for the callback trigger @@ -427,7 +427,7 @@ public class BackupEngineOptions extends RocksObject { return callbackTriggerIntervalSize(nativeHandle_); } - private native static long newBackupEngineOptions(final String path); + private static native long newBackupEngineOptions(final String path); private native String backupDir(long handle); private native void setBackupEnv(final long handle, final long envHandle); private native void setShareTableFiles(long handle, boolean flag); diff --git a/java/src/main/java/org/rocksdb/BackupInfo.java b/java/src/main/java/org/rocksdb/BackupInfo.java index 9244e4eb1..9581b098f 100644 --- a/java/src/main/java/org/rocksdb/BackupInfo.java +++ b/java/src/main/java/org/rocksdb/BackupInfo.java @@ -68,9 +68,9 @@ public class BackupInfo { return app_metadata_; } - private int backupId_; - private long timestamp_; - private long size_; - private int numberFiles_; - private String app_metadata_; + private final int backupId_; + private final long timestamp_; + private final long size_; + private final int numberFiles_; + private final String app_metadata_; } diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index 9300468b0..70dee3dd9 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -6,10 +6,10 @@ package org.rocksdb; /** * The config for plain table sst format. - * + *

* BlockBasedTable is a RocksDB's default SST file format. */ -//TODO(AR) should be renamed BlockBasedTableOptions +// TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { public BlockBasedTableConfig() { @@ -243,7 +243,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { * Disable block cache. If this is set to true, * then no block cache should be used, and the {@link #setBlockCache(Cache)} * should point to a {@code null} object. - * + *

* Default: false * * @param noBlockCache if use block cache @@ -257,10 +257,10 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Use the specified cache for blocks. * When not null this take precedence even if the user sets a block cache size. - * + *

* {@link org.rocksdb.Cache} should not be disposed before options instances * using this cache is disposed. - * + *

* {@link org.rocksdb.Cache} instance can be re-used in multiple options * instances. * @@ -276,7 +276,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Use the specified persistent cache. - * + *

* If {@code !null} use the specified cache for pages read from device, * otherwise no page cache is used. * @@ -327,7 +327,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { * is less than this specified number and adding a new record to the block * will exceed the configured block size, then this block will be closed and * the new record will be written to the next block. - * + *

* Default is 10. * * @param blockSizeDeviation the deviation to block size allowed @@ -414,7 +414,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Use partitioned full filters for each SST file. This option is incompatible * with block-based filters. - * + *

* Defaults to false. * * @param partitionFilters use partition filters. @@ -428,7 +428,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /*** * Option to generate Bloom filters that minimize memory * internal fragmentation. - * + *

* See {@link #setOptimizeFiltersForMemory(boolean)}. * * @return true if bloom filters are used to minimize memory internal @@ -442,7 +442,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Option to generate Bloom filters that minimize memory * internal fragmentation. - * + *

* When false, malloc_usable_size is not available, or format_version < 5, * filters are generated without regard to internal fragmentation when * loaded into memory (historical behavior). When true (and @@ -452,21 +452,21 @@ public class BlockBasedTableConfig extends TableFormatConfig { * the reading DB has the same memory allocation characteristics as the * generating DB. This option does not break forward or backward * compatibility. - * + *

* While individual filters will vary in bits/key and false positive rate * when setting is true, the implementation attempts to maintain a weighted * average FP rate for filters consistent with this option set to false. - * + *

* With Jemalloc for example, this setting is expected to save about 10% of * the memory footprint and block cache charge of filters, while increasing * disk usage of filters by about 1-2% due to encoding efficiency losses * with variance in bits/key. - * + *

* NOTE: Because some memory counted by block cache might be unmapped pages * within internal fragmentation, this option can increase observed RSS * memory usage. With {@link #cacheIndexAndFilterBlocks()} == true, * this option makes the block cache better at using space it is allowed. - * + *

* NOTE: Do not set to true if you do not trust malloc_usable_size. With * this option, RocksDB might access an allocated memory object beyond its * original size if malloc_usable_size says it is safe to do so. While this @@ -495,9 +495,9 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Use delta encoding to compress keys in blocks. - * + *

* NOTE: {@link ReadOptions#pinData()} requires this option to be disabled. - * + *

* Default: true * * @param useDeltaEncoding true to enable delta encoding @@ -521,10 +521,10 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Use the specified filter policy to reduce disk reads. - * + *

* {@link org.rocksdb.Filter} should not be closed before options instances * using this filter are closed. - * + *

* {@link org.rocksdb.Filter} instance can be re-used in multiple options * instances. * @@ -576,7 +576,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Returns true when compression verification is enabled. - * + *

* See {@link #setVerifyCompression(boolean)}. * * @return true if compression verification is enabled. @@ -602,7 +602,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Get the Read amplification bytes per-bit. - * + *

* See {@link #setReadAmpBytesPerBit(int)}. * * @return the bytes per-bit. @@ -613,27 +613,27 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Set the Read amplification bytes per-bit. - * + *

* If used, For every data block we load into memory, we will create a bitmap * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap * will be used to figure out the percentage we actually read of the blocks. - * + *

* When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the * read amplification using this formula * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - * + *

* value => memory usage (percentage of loaded blocks memory) * 1 => 12.50 % * 2 => 06.25 % * 4 => 03.12 % * 8 => 01.56 % * 16 => 00.78 % - * + *

* Note: This number must be a power of 2, if not it will be sanitized * to be the next lowest power of 2, for example a value of 7 will be * treated as 4, a value of 19 will be treated as 16. - * + *

* Default: 0 (disabled) * * @param readAmpBytesPerBit the bytes per-bit @@ -699,7 +699,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Determine if index compression is enabled. - * + *

* See {@link #setEnableIndexCompression(boolean)}. * * @return true if index compression is enabled, false otherwise @@ -710,7 +710,7 @@ public class BlockBasedTableConfig extends TableFormatConfig { /** * Store index blocks on disk in compressed format. - * + *

* Changing this option to false will avoid the overhead of decompression * if index blocks are evicted and read back. * diff --git a/java/src/main/java/org/rocksdb/BloomFilter.java b/java/src/main/java/org/rocksdb/BloomFilter.java index 8aff715b7..0b4e93229 100644 --- a/java/src/main/java/org/rocksdb/BloomFilter.java +++ b/java/src/main/java/org/rocksdb/BloomFilter.java @@ -69,5 +69,5 @@ public class BloomFilter extends Filter { this(bitsPerKey); } - private native static long createNewBloomFilter(final double bitsKeyKey); + private static native long createNewBloomFilter(final double bitsKeyKey); } diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java index 8eef95447..f918a8d03 100644 --- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java +++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java @@ -12,7 +12,7 @@ import java.util.List; /** * A ByteBuffer containing fetched data, together with a result for the fetch * and the total size of the object fetched. - * + *

* Used for the individual results of * {@link RocksDB#multiGetByteBuffers(List, List)} * {@link RocksDB#multiGetByteBuffers(List, List, List)} diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index 569a1df06..04bd3fcaa 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -35,6 +35,6 @@ public abstract class Cache extends RocksObject { return getPinnedUsage(this.nativeHandle_); } - private native static long getUsage(final long handle); - private native static long getPinnedUsage(final long handle); + private static native long getUsage(final long handle); + private static native long getPinnedUsage(final long handle); } diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index 6c87cc188..12854c510 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -10,10 +10,11 @@ package org.rocksdb; */ public class CassandraCompactionFilter extends AbstractCompactionFilter { - public CassandraCompactionFilter(boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds) { + public CassandraCompactionFilter( + final boolean purgeTtlOnExpiration, final int gcGracePeriodInSeconds) { super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); } - private native static long createNewCassandraCompactionFilter0( + private static native long createNewCassandraCompactionFilter0( boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds); } diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index 4b0c71ba5..732faee20 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -10,16 +10,16 @@ package org.rocksdb; * values. */ public class CassandraValueMergeOperator extends MergeOperator { - public CassandraValueMergeOperator(int gcGracePeriodInSeconds) { + public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); - } + } - public CassandraValueMergeOperator(int gcGracePeriodInSeconds, int operandsLimit) { - super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); - } + public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) { + super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); + } - private native static long newSharedCassandraValueMergeOperator( - int gcGracePeriodInSeconds, int limit); + private static native long newSharedCassandraValueMergeOperator( + int gcGracePeriodInSeconds, int limit); - @Override protected final native void disposeInternal(final long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index 000969932..c9b3886c0 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -31,8 +31,7 @@ public class Checkpoint extends RocksObject { throw new IllegalStateException( "RocksDB instance must be initialized."); } - Checkpoint checkpoint = new Checkpoint(db); - return checkpoint; + return new Checkpoint(db); } /** @@ -53,11 +52,8 @@ public class Checkpoint extends RocksObject { private Checkpoint(final RocksDB db) { super(newCheckpoint(db.nativeHandle_)); - this.db_ = db; } - private final RocksDB db_; - private static native long newCheckpoint(long dbHandle); @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index a66dc0e8a..e4251db8e 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -53,7 +53,7 @@ public class ClockCache extends Cache { super(newClockCache(capacity, numShardBits, strictCapacityLimit)); } - private native static long newClockCache(final long capacity, - final int numShardBits, final boolean strictCapacityLimit); + private static native long newClockCache( + final long capacity, final int numShardBits, final boolean strictCapacityLimit); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 1ac0a35bb..32ea4b04d 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -32,17 +32,17 @@ public class ColumnFamilyHandle extends RocksObject { /** * Constructor called only from JNI. - * + *

* NOTE: we are producing an additional Java Object here to represent the underlying native C++ * ColumnFamilyHandle object. The underlying object is not owned by ourselves. The Java API user * likely already had a ColumnFamilyHandle Java object which owns the underlying C++ object, as * they will have been presented it when they opened the database or added a Column Family. - * + *

* * TODO(AR) - Potentially a better design would be to cache the active Java Column Family Objects * in RocksDB, and return the same Java Object instead of instantiating a new one here. This could * also help us to improve the Java API semantics for Java users. See for example - * https://github.com/facebook/rocksdb/issues/2687. + * .... * * @param nativeHandle native handle to the column family. */ @@ -80,7 +80,7 @@ public class ColumnFamilyHandle extends RocksObject { * information, this call might internally lock and release DB mutex to * access the up-to-date CF options. In addition, all the pointer-typed * options cannot be referenced any longer than the original options exist. - * + *

* Note that this function is not supported in RocksDBLite. * * @return the up-to-date descriptor. @@ -107,7 +107,7 @@ public class ColumnFamilyHandle extends RocksObject { return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && getID() == that.getID() && Arrays.equals(getName(), that.getName()); - } catch (RocksDBException e) { + } catch (final RocksDBException e) { throw new RuntimeException("Cannot compare column family handles", e); } } @@ -118,7 +118,7 @@ public class ColumnFamilyHandle extends RocksObject { int result = Objects.hash(getID(), rocksDB_.nativeHandle_); result = 31 * result + Arrays.hashCode(getName()); return result; - } catch (RocksDBException e) { + } catch (final RocksDBException e) { throw new RuntimeException("Cannot calculate hash code of column family handle", e); } } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 65dfd328f..d8d9658fc 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -1291,7 +1291,7 @@ public class ColumnFamilyOptions extends RocksObject * Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. * - * @param prepopulateBlobCache the prepopulate blob cache option + * @param prepopulateBlobCache prepopulate the blob cache option * * @return the reference to the current options. */ diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 97357aacf..776fc7038 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -121,9 +121,9 @@ public interface ColumnFamilyOptionsInterface * Note: Comparator can be set once upon database creation. - * + *

* Default: BytewiseComparator. * @param builtinComparator a {@link BuiltinComparator} type. * @return the instance of the current object. @@ -133,11 +133,11 @@ public interface ColumnFamilyOptionsInterface * Comparator should not be disposed before options instances using this comparator is * disposed. If dispose() function is not called, then comparator object will be * GC'd automatically. - * + *

* Comparator instance can be re-used in multiple options instances. * * @param comparator java instance. @@ -176,17 +176,17 @@ public interface ColumnFamilyOptionsInterface * If the client requires a new compaction filter to be used for different * compaction runs, it can specify call * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} * instead. - * + *

* The client should specify only set one of the two. - * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence + * {#setCompactionFilter(AbstractCompactionFilter)} takes precedence * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} * if the client specifies both. - * + *

* If multithreaded compaction is being used, the supplied CompactionFilter * instance may be used from different threads concurrently and so should be thread-safe. * @@ -207,7 +207,7 @@ public interface ColumnFamilyOptionsInterface * A new filter will be created on each compaction run. If multithreaded * compaction is being used, each created CompactionFilter will only be used * from a single thread and so does not need to be thread-safe. @@ -228,7 +228,7 @@ public interface ColumnFamilyOptionsInterface * In some hash-based memtable representation such as HashLinkedList * and HashSkipList, prefixes are used to partition the keys into * several buckets. Prefix extractor is used to specify how to @@ -404,7 +404,7 @@ public interface ColumnFamilyOptionsInterface * If left empty, db_paths will be used. * Default: empty * @@ -422,7 +422,7 @@ public interface ColumnFamilyOptionsInterface * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * * @param bottommostCompressionType The compression type to use for the @@ -437,7 +437,7 @@ public interface ColumnFamilyOptionsInterface * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} * * @return The compression type used for the bottommost level @@ -447,7 +447,7 @@ public interface ColumnFamilyOptionsInterface * To enable it, please see the definition of * {@link CompressionOptions}. * @@ -460,7 +460,7 @@ public interface ColumnFamilyOptionsInterface * See {@link #setBottommostCompressionOptions(CompressionOptions)}. * * @return the bottom most compression options. @@ -489,7 +489,7 @@ public interface ColumnFamilyOptionsInterface * Default: nullptr * * @param factory The factory reference diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index cf5708601..62559b34f 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -10,11 +10,10 @@ package org.rocksdb; * any compaction that is using this CompactRangeOptions. */ public class CompactRangeOptions extends RocksObject { - - private final static byte VALUE_kSkip = 0; - private final static byte VALUE_kIfHaveCompactionFilter = 1; - private final static byte VALUE_kForce = 2; - private final static byte VALUE_kForceOptimized = 3; + private static final byte VALUE_kSkip = 0; + private static final byte VALUE_kIfHaveCompactionFilter = 1; + private static final byte VALUE_kForce = 2; + private static final byte VALUE_kForceOptimized = 3; // For level based compaction, we can configure if we want to skip/force bottommost level // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in @@ -219,7 +218,7 @@ public class CompactRangeOptions extends RocksObject { return this; } - private native static long newCompactRangeOptions(); + private static native long newCompactRangeOptions(); @Override protected final native void disposeInternal(final long handle); private native boolean exclusiveManualCompaction(final long handle); diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java index 4e3b8d68b..cf04bde24 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobInfo.java +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -98,7 +98,7 @@ public class CompactionJobInfo extends RocksObject { /** * Get the table properties for the input and output tables. - * + *

* The map is keyed by values from {@link #inputFiles()} and * {@link #outputFiles()}. * diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index 4c8d6545c..92b21fc50 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -17,7 +17,7 @@ public class CompactionOptionsFIFO extends RocksObject { /** * Once the total sum of table files reaches this, we will delete the oldest * table file - * + *

* Default: 1GB * * @param maxTableFilesSize The maximum size of the table files @@ -33,7 +33,7 @@ public class CompactionOptionsFIFO extends RocksObject { /** * Once the total sum of table files reaches this, we will delete the oldest * table file - * + *

* Default: 1GB * * @return max table file size in bytes @@ -48,7 +48,7 @@ public class CompactionOptionsFIFO extends RocksObject { * and compaction won't trigger if average compact bytes per del file is * larger than options.write_buffer_size. This is to protect large files * from being compacted again. - * + *

* Default: false * * @param allowCompaction true to allow intra-L0 compaction @@ -61,13 +61,12 @@ public class CompactionOptionsFIFO extends RocksObject { return this; } - /** * Check if intra-L0 compaction is enabled. * When enabled, we try to compact smaller files into larger ones. - * + *

* See {@link #setAllowCompaction(boolean)}. - * + *

* Default: false * * @return true if intra-L0 compaction is enabled, false otherwise. @@ -76,8 +75,7 @@ public class CompactionOptionsFIFO extends RocksObject { return allowCompaction(nativeHandle_); } - - private native static long newCompactionOptionsFIFO(); + private static native long newCompactionOptionsFIFO(); @Override protected final native void disposeInternal(final long handle); private native void setMaxTableFilesSize(final long handle, diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index d2dfa4eef..4d2ebdb1f 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -18,7 +18,7 @@ public class CompactionOptionsUniversal extends RocksObject { * Percentage flexibility while comparing file size. If the candidate file(s) * size is 1% smaller than the next file's size, then include next file into * this candidate set. - * + *

* Default: 1 * * @param sizeRatio The size ratio to use @@ -34,7 +34,7 @@ public class CompactionOptionsUniversal extends RocksObject { * Percentage flexibility while comparing file size. If the candidate file(s) * size is 1% smaller than the next file's size, then include next file into * this candidate set. - * + *

* Default: 1 * * @return The size ratio in use @@ -45,7 +45,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The minimum number of files in a single compaction run. - * + *

* Default: 2 * * @param minMergeWidth minimum number of files in a single compaction run @@ -59,7 +59,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The minimum number of files in a single compaction run. - * + *

* Default: 2 * * @return minimum number of files in a single compaction run @@ -70,7 +70,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The maximum number of files in a single compaction run. - * + *

* Default: {@link Long#MAX_VALUE} * * @param maxMergeWidth maximum number of files in a single compaction run @@ -84,7 +84,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The maximum number of files in a single compaction run. - * + *

* Default: {@link Long#MAX_VALUE} * * @return maximum number of files in a single compaction run @@ -102,7 +102,7 @@ public class CompactionOptionsUniversal extends RocksObject { * a size amplification of 0%. Rocksdb uses the following heuristic * to calculate size amplification: it assumes that all files excluding * the earliest file contribute to the size amplification. - * + *

* Default: 200, which means that a 100 byte database could require upto * 300 bytes of storage. * @@ -126,7 +126,7 @@ public class CompactionOptionsUniversal extends RocksObject { * a size amplification of 0%. Rocksdb uses the following heuristic * to calculate size amplification: it assumes that all files excluding * the earliest file contribute to the size amplification. - * + *

* Default: 200, which means that a 100 byte database could require upto * 300 bytes of storage. * @@ -140,11 +140,11 @@ public class CompactionOptionsUniversal extends RocksObject { /** * If this option is set to be -1 (the default value), all the output files * will follow compression type specified. - * + *

* If this option is not negative, we will try to make sure compressed * size is just above this value. In normal cases, at least this percentage * of data will be compressed. - * + *

* When we are compacting to a new file, here is the criteria whether * it needs to be compressed: assuming here are the list of files sorted * by generation time: @@ -154,7 +154,7 @@ public class CompactionOptionsUniversal extends RocksObject { * well as the total size of C1...Ct as total_C, the compaction output file * will be compressed iff * total_C / total_size < this percentage - * + *

* Default: -1 * * @param compressionSizePercent percentage of size for compression @@ -170,11 +170,11 @@ public class CompactionOptionsUniversal extends RocksObject { /** * If this option is set to be -1 (the default value), all the output files * will follow compression type specified. - * + *

* If this option is not negative, we will try to make sure compressed * size is just above this value. In normal cases, at least this percentage * of data will be compressed. - * + *

* When we are compacting to a new file, here is the criteria whether * it needs to be compressed: assuming here are the list of files sorted * by generation time: @@ -184,7 +184,7 @@ public class CompactionOptionsUniversal extends RocksObject { * well as the total size of C1...Ct as total_C, the compaction output file * will be compressed iff * total_C / total_size < this percentage - * + *

* Default: -1 * * @return percentage of size for compression @@ -195,7 +195,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The algorithm used to stop picking files into a single compaction run - * + *

* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} * * @param compactionStopStyle The compaction algorithm @@ -210,7 +210,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * The algorithm used to stop picking files into a single compaction run - * + *

* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} * * @return The compaction algorithm @@ -222,7 +222,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * Option to optimize the universal multi level compaction by enabling * trivial move for non overlapping files. - * + *

* Default: false * * @param allowTrivialMove true if trivial move is allowed @@ -238,7 +238,7 @@ public class CompactionOptionsUniversal extends RocksObject { /** * Option to optimize the universal multi level compaction by enabling * trivial move for non overlapping files. - * + *

* Default: false * * @return true if trivial move is allowed @@ -247,7 +247,7 @@ public class CompactionOptionsUniversal extends RocksObject { return allowTrivialMove(nativeHandle_); } - private native static long newCompactionOptionsUniversal(); + private static native long newCompactionOptionsUniversal(); @Override protected final native void disposeInternal(final long handle); private native void setSizeRatio(final long handle, final int sizeRatio); diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index b24bbf850..794074df6 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -9,7 +9,7 @@ import java.util.List; /** * Enum CompactionStyle - * + *

* RocksDB supports different styles of compaction. Available * compaction styles can be chosen using this enumeration. * @@ -25,7 +25,8 @@ import java.util.List; * the old data, so it's basically a TTL compaction style. *

  • NONE - Disable background compaction. * Compaction jobs are submitted - * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.
  • + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, + * CompactionJobInfo)} ()}. * * * @see * Note that dispose() must be called before a ComparatorOptions * instance becomes out-of-scope to release the allocated memory in C++. */ @@ -48,10 +48,10 @@ public class ComparatorOptions extends RocksObject { } /** - * Indicates if a direct byte buffer (i.e. outside of the normal + * Indicates if a direct byte buffer (i.e. outside the normal * garbage-collected heap) is used, as opposed to a non-direct byte buffer * which is a wrapper around an on-heap byte[]. - * + *

    * Default: true * * @return true if a direct byte buffer will be used, false otherwise @@ -62,10 +62,10 @@ public class ComparatorOptions extends RocksObject { } /** - * Controls whether a direct byte buffer (i.e. outside of the normal + * Controls whether a direct byte buffer (i.e. outside the normal * garbage-collected heap) is used, as opposed to a non-direct byte buffer * which is a wrapper around an on-heap byte[]. - * + *

    * Default: true * * @param useDirectBuffer true if a direct byte buffer should be used, @@ -86,7 +86,7 @@ public class ComparatorOptions extends RocksObject { * if it requires less than {@code maxReuseBufferSize}, then an * existing buffer will be reused, else a new buffer will be * allocated just for that callback. - * + *

    * Default: 64 bytes * * @return the maximum size of a buffer which is reused, @@ -105,7 +105,7 @@ public class ComparatorOptions extends RocksObject { * if it requires less than {@code maxReuseBufferSize}, then an * existing buffer will be reused, else a new buffer will be * allocated just for that callback. - * + *

    * Default: 64 bytes * * @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to @@ -119,7 +119,7 @@ public class ComparatorOptions extends RocksObject { return this; } - private native static long newComparatorOptions(); + private static native long newComparatorOptions(); private native byte reusedSynchronisationType(final long handle); private native void setReusedSynchronisationType(final long handle, final byte reusedSynchronisationType); diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index a9072bbb9..2e1ee5731 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -48,9 +48,9 @@ public class CompressionOptions extends RocksObject { * loaded into the compression library before compressing/uncompressing each * data block of subsequent files in the subcompaction. Effectively, this * improves compression ratios when there are repetitions across data blocks. - * + *

    * A value of 0 indicates the feature is disabled. - * + *

    * Default: 0. * * @param maxDictBytes Maximum bytes to use for the dictionary @@ -75,10 +75,10 @@ public class CompressionOptions extends RocksObject { * Maximum size of training data passed to zstd's dictionary trainer. Using * zstd's dictionary trainer can achieve even better compression ratio * improvements than using {@link #setMaxDictBytes(int)} alone. - * + *

    * The training data will be used to generate a dictionary * of {@link #maxDictBytes()}. - * + *

    * Default: 0. * * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd. @@ -104,10 +104,10 @@ public class CompressionOptions extends RocksObject { * For bottommost_compression_opts, to enable it, user must set enabled=true. * Otherwise, bottommost compression will use compression_opts as default * compression options. - * + *

    * For compression_opts, if compression_opts.enabled=false, it is still * used as compression options for compression process. - * + *

    * Default: false. * * @param enabled true to use these compression options @@ -131,8 +131,7 @@ public class CompressionOptions extends RocksObject { return enabled(nativeHandle_); } - - private native static long newCompressionOptions(); + private static native long newCompressionOptions(); @Override protected final native void disposeInternal(final long handle); private native void setWindowBits(final long handle, final int windowBits); diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index d1d73d51a..d1ecf0ac8 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -35,9 +35,9 @@ public enum CompressionType { * * @return CompressionType instance. */ - public static CompressionType getCompressionType(String libraryName) { + public static CompressionType getCompressionType(final String libraryName) { if (libraryName != null) { - for (CompressionType compressionType : CompressionType.values()) { + for (final CompressionType compressionType : CompressionType.values()) { if (compressionType.getLibraryName() != null && compressionType.getLibraryName().equals(libraryName)) { return compressionType; @@ -58,7 +58,7 @@ public enum CompressionType { * @throws IllegalArgumentException If CompressionType cannot be found for the * provided byteIdentifier */ - public static CompressionType getCompressionType(byte byteIdentifier) { + public static CompressionType getCompressionType(final byte byteIdentifier) { for (final CompressionType compressionType : CompressionType.values()) { if (compressionType.getValue() == byteIdentifier) { return compressionType; diff --git a/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java index 4d93f0c99..026f8b01d 100644 --- a/java/src/main/java/org/rocksdb/ConfigOptions.java +++ b/java/src/main/java/org/rocksdb/ConfigOptions.java @@ -44,10 +44,10 @@ public class ConfigOptions extends RocksObject { @Override protected final native void disposeInternal(final long handle); - private native static long newConfigOptions(); - private native static void setEnv(final long handle, final long envHandle); - private native static void setDelimiter(final long handle, final String delimiter); - private native static void setIgnoreUnknownOptions(final long handle, final boolean ignore); - private native static void setInputStringsEscaped(final long handle, final boolean escaped); - private native static void setSanityLevel(final long handle, final byte level); + private static native long newConfigOptions(); + private static native void setEnv(final long handle, final long envHandle); + private static native void setDelimiter(final long handle, final String delimiter); + private static native void setIgnoreUnknownOptions(final long handle, final boolean ignore); + private static native void setInputStringsEscaped(final long handle, final boolean escaped); + private static native void setSanityLevel(final long handle, final byte level); } diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index 9eb5ca873..655d900c3 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -11,7 +11,7 @@ import java.util.*; /** * DBOptions to control the behavior of a database. It will be used * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). - * + *

    * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} * and will be automatically released if opened in the preamble of a try with resources block. */ @@ -24,7 +24,7 @@ public class DBOptions extends RocksObject /** * Construct DBOptions. - * + *

    * This constructor will create (by allocating a block of memory) * an {@code rocksdb::DBOptions} in the c++ side. */ @@ -36,13 +36,13 @@ public class DBOptions extends RocksObject /** * Copy constructor for DBOptions. - * + *

    * NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager, * info_log and other pointers will be cloned! * * @param other The DBOptions to copy. */ - public DBOptions(DBOptions other) { + public DBOptions(final DBOptions other) { super(copyDBOptions(other.nativeHandle_)); this.env_ = other.env_; this.numShardBits_ = other.numShardBits_; diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 02fa3511f..5aa0866ff 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -10,13 +10,13 @@ import java.nio.ByteBuffer; /** * Base class for slices which will receive direct * ByteBuffer based access to the underlying data. - * + *

    * ByteBuffer backed slices typically perform better with * larger keys and values. When using smaller keys and * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { - public final static DirectSlice NONE = new DirectSlice(); + public static final DirectSlice NONE = new DirectSlice(); /** * Indicates whether we have to free the memory pointed to by the Slice @@ -29,7 +29,7 @@ public class DirectSlice extends AbstractSlice { * Called from JNI to construct a new Java DirectSlice * without an underlying C++ object set * at creation time. - * + *

    * Note: You should be aware that it is intentionally marked as * package-private. This is so that developers cannot construct their own * default DirectSlice objects (at present). As developers cannot construct @@ -123,9 +123,8 @@ public class DirectSlice extends AbstractSlice { disposeInternal(nativeHandle); } - private native static long createNewDirectSlice0(final ByteBuffer data, - final int length); - private native static long createNewDirectSlice1(final ByteBuffer data); + private static native long createNewDirectSlice0(final ByteBuffer data, final int length); + private static native long createNewDirectSlice1(final ByteBuffer data); @Override protected final native ByteBuffer data0(long handle); private native byte get0(long handle, int offset); private native void clear0(long handle, boolean internalBuffer, diff --git a/java/src/main/java/org/rocksdb/EncodingType.java b/java/src/main/java/org/rocksdb/EncodingType.java index 5ceeb54c8..c2790c195 100644 --- a/java/src/main/java/org/rocksdb/EncodingType.java +++ b/java/src/main/java/org/rocksdb/EncodingType.java @@ -47,7 +47,7 @@ public enum EncodingType { return value_; } - private EncodingType(byte value) { + private EncodingType(final byte value) { value_ = value; } diff --git a/java/src/main/java/org/rocksdb/Env.java b/java/src/main/java/org/rocksdb/Env.java index 07b5319bb..db4c6fd78 100644 --- a/java/src/main/java/org/rocksdb/Env.java +++ b/java/src/main/java/org/rocksdb/Env.java @@ -19,7 +19,7 @@ public abstract class Env extends RocksObject { private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal()); static { - /** + /* * The Ownership of the Default Env belongs to C++ * and so we disown the native handle here so that * we cannot accidentally free it from Java. diff --git a/java/src/main/java/org/rocksdb/EnvOptions.java b/java/src/main/java/org/rocksdb/EnvOptions.java index 6baddb310..5cb193ac1 100644 --- a/java/src/main/java/org/rocksdb/EnvOptions.java +++ b/java/src/main/java/org/rocksdb/EnvOptions.java @@ -31,7 +31,7 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable memory mapped reads. - * + *

    * Default: false * * @param useMmapReads true to enable memory mapped reads, false to disable. @@ -55,7 +55,7 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable memory mapped Writes. - * + *

    * Default: true * * @param useMmapWrites true to enable memory mapped writes, false to disable. @@ -79,7 +79,7 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable direct reads, i.e. {@code O_DIRECT}. - * + *

    * Default: false * * @param useDirectReads true to enable direct reads, false to disable. @@ -103,7 +103,7 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable direct writes, i.e. {@code O_DIRECT}. - * + *

    * Default: false * * @param useDirectWrites true to enable direct writes, false to disable. @@ -127,9 +127,9 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable fallocate calls. - * + *

    * Default: true - * + *

    * If false, {@code fallocate()} calls are bypassed. * * @param allowFallocate true to enable fallocate calls, false to disable. @@ -153,7 +153,7 @@ public class EnvOptions extends RocksObject { /** * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors. - * + *

    * Default: true * * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit, @@ -181,7 +181,7 @@ public class EnvOptions extends RocksObject { * Allows OS to incrementally sync files to disk while they are being * written, in the background. Issue one request for every * {@code bytesPerSync} written. - * + *

    * Default: 0 * * @param bytesPerSync 0 to disable, otherwise the number of bytes. @@ -323,8 +323,8 @@ public class EnvOptions extends RocksObject { return rateLimiter; } - private native static long newEnvOptions(); - private native static long newEnvOptions(final long dboptions_handle); + private static native long newEnvOptions(); + private static native long newEnvOptions(final long dboptions_handle); @Override protected final native void disposeInternal(final long handle); private native void setUseMmapReads(final long handle, diff --git a/java/src/main/java/org/rocksdb/EventListener.java b/java/src/main/java/org/rocksdb/EventListener.java index a12ab92ba..27652eaf8 100644 --- a/java/src/main/java/org/rocksdb/EventListener.java +++ b/java/src/main/java/org/rocksdb/EventListener.java @@ -12,7 +12,7 @@ import java.util.List; * be called when specific RocksDB event happens such as flush. It can * be used as a building block for developing custom features such as * stats-collector or external compaction algorithm. - * + *

    * Note that callback functions should not run for an extended period of * time before the function returns, otherwise RocksDB may be blocked. * For example, it is not suggested to do @@ -21,17 +21,17 @@ import java.util.List; * {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} * (as Put may be blocked in certain cases) in the same thread in the * EventListener callback. - * + *

    * However, doing * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, * CompactionJobInfo)} and {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} in * another thread is considered safe. - * + *

    * [Threading] All EventListener callback will be called using the * actual thread that involves in that specific event. For example, it * is the RocksDB background flush thread that does the actual flush to * call {@link #onFlushCompleted(RocksDB, FlushJobInfo)}. - * + *

    * [Locking] All EventListener callbacks are designed to be called without * the current thread holding any DB mutex. This is to prevent potential * deadlock and performance issue when using EventListener callback @@ -41,7 +41,7 @@ public interface EventListener { /** * A callback function to RocksDB which will be called before a * RocksDB starts to flush memtables. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -55,7 +55,7 @@ public interface EventListener { /** * callback function to RocksDB which will be called whenever a * registered RocksDB flushes a file. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -77,7 +77,7 @@ public interface EventListener { * on file creations and deletions is suggested to implement * {@link #onFlushCompleted(RocksDB, FlushJobInfo)} and * {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}. - * + *

    * Note that if applications would like to use the passed reference * outside this function call, they should make copies from the * returned value. @@ -91,7 +91,7 @@ public interface EventListener { * A callback function to RocksDB which will be called before a * RocksDB starts to compact. The default implementation is * no-op. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -108,7 +108,7 @@ public interface EventListener { * A callback function for RocksDB which will be called whenever * a registered RocksDB compacts a file. The default implementation * is a no-op. - * + *

    * Note that this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -129,11 +129,11 @@ public interface EventListener { * of a pointer to DB. Applications that build logic basic based * on file creations and deletions is suggested to implement * OnFlushCompleted and OnCompactionCompleted. - * + *

    * Historically it will only be called if the file is successfully created. * Now it will also be called on failure case. User can check info.status * to see if it succeeded or not. - * + *

    * Note that if applications would like to use the passed reference * outside this function call, they should make copies from these * returned value. @@ -147,7 +147,7 @@ public interface EventListener { * A callback function for RocksDB which will be called before * a SST file is being created. It will follow by OnTableFileCreated after * the creation finishes. - * + *

    * Note that if applications would like to use the passed reference * outside this function call, they should make copies from these * returned value. @@ -160,11 +160,11 @@ public interface EventListener { /** * A callback function for RocksDB which will be called before * a memtable is made immutable. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. - * + *

    * Note that if applications would like to use the passed reference * outside this function call, they should make copies from these * returned value. @@ -177,7 +177,7 @@ public interface EventListener { /** * A callback function for RocksDB which will be called before * a column family handle is deleted. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -190,7 +190,7 @@ public interface EventListener { /** * A callback function for RocksDB which will be called after an external * file is ingested using IngestExternalFile. - * + *

    * Note that the this function will run on the same thread as * IngestExternalFile(), if this function is blocked, IngestExternalFile() * will be blocked from finishing. @@ -210,7 +210,7 @@ public interface EventListener { * preventing the database from entering read-only mode. We do not provide any * guarantee when failed flushes/compactions will be rescheduled if the user * suppresses an error. - * + *

    * Note that this function can run on the same threads as flush, compaction, * and user writes. So, it is extremely important not to perform heavy * computations or blocking calls in this function. @@ -224,7 +224,7 @@ public interface EventListener { /** * A callback function for RocksDB which will be called whenever a change * of superversion triggers a change of the stall conditions. - * + *

    * Note that the this function must be implemented in a way such that * it should not run for an extended period of time before the function * returns. Otherwise, RocksDB may be blocked. @@ -301,7 +301,7 @@ public interface EventListener { * If true, the {@link #onFileReadFinish(FileOperationInfo)} * and {@link #onFileWriteFinish(FileOperationInfo)} will be called. If * false, then they won't be called. - * + *

    * Default: false * * @return whether to callback when file read/write is finished diff --git a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java index 6b14a8024..7a99dd6bf 100644 --- a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java +++ b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java @@ -74,12 +74,12 @@ public class ExternalFileIngestionInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o; + final ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o; return globalSeqno == that.globalSeqno && Objects.equals(columnFamilyName, that.columnFamilyName) && Objects.equals(externalFilePath, that.externalFilePath) diff --git a/java/src/main/java/org/rocksdb/FileOperationInfo.java b/java/src/main/java/org/rocksdb/FileOperationInfo.java index aa5743ed3..fae9cd5de 100644 --- a/java/src/main/java/org/rocksdb/FileOperationInfo.java +++ b/java/src/main/java/org/rocksdb/FileOperationInfo.java @@ -87,7 +87,7 @@ public class FileOperationInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java index ca9aa0523..414d3a2f3 100644 --- a/java/src/main/java/org/rocksdb/FlushJobInfo.java +++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java @@ -90,7 +90,7 @@ public class FlushJobInfo { * Determine if rocksdb is currently slowing-down all writes to prevent * creating too many Level 0 files as compaction seems not able to * catch up the write request speed. - * + *

    * This indicates that there are too many files in Level 0. * * @return true if rocksdb is currently slowing-down all writes, @@ -103,7 +103,7 @@ public class FlushJobInfo { /** * Determine if rocksdb is currently blocking any writes to prevent * creating more L0 files. - * + *

    * This indicates that there are too many files in level 0. * Compactions should try to compact L0 files down to lower levels as soon * as possible. @@ -151,12 +151,12 @@ public class FlushJobInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - FlushJobInfo that = (FlushJobInfo) o; + final FlushJobInfo that = (FlushJobInfo) o; return columnFamilyId == that.columnFamilyId && threadId == that.threadId && jobId == that.jobId && triggeredWritesSlowdown == that.triggeredWritesSlowdown && triggeredWritesStop == that.triggeredWritesStop && smallestSeqno == that.smallestSeqno diff --git a/java/src/main/java/org/rocksdb/FlushOptions.java b/java/src/main/java/org/rocksdb/FlushOptions.java index 760b515fd..0ec835089 100644 --- a/java/src/main/java/org/rocksdb/FlushOptions.java +++ b/java/src/main/java/org/rocksdb/FlushOptions.java @@ -47,13 +47,13 @@ public class FlushOptions extends RocksObject { } /** - * Set to true so that flush would proceeds immediately even it it means + * Set to true so that flush would proceed immediately even if it means * writes will stall for the duration of the flush. - * + *

    * Set to false so that the operation will wait until it's possible to do * the flush without causing stall or until required flush is performed by * someone else (foreground call or background thread). - * + *

    * Default: false * * @param allowWriteStall true to allow writes to stall for flush, false @@ -78,7 +78,7 @@ public class FlushOptions extends RocksObject { return allowWriteStall(nativeHandle_); } - private native static long newFlushOptions(); + private static native long newFlushOptions(); @Override protected final native void disposeInternal(final long handle); private native void setWaitForFlush(final long handle, diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index 05cc2bb90..4bc860d1c 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -6,7 +6,7 @@ package org.rocksdb; * Such memtable contains a fix-sized array of buckets, where * each bucket points to a sorted singly-linked * list (or null if the bucket is empty). - * + *

    * Note that since this mem-table representation relies on the * key prefix, it is required to invoke one of the usePrefixExtractor * functions to specify how to extract key prefix given a key. diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java index efc78b14e..7cfa1c0df 100644 --- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -6,7 +6,7 @@ package org.rocksdb; * Such mem-table representation contains a fix-sized array of * buckets, where each bucket points to a skiplist (or null if the * bucket is empty). - * + *

    * Note that since this mem-table representation relies on the * key prefix, it is required to invoke one of the usePrefixExtractor * functions to specify how to extract key prefix given a key. diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index c5da68d16..35724a108 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -63,7 +63,7 @@ public enum HistogramType { /** * number of bytes decompressed. - * + *

    * number of bytes is when uncompressed; i.e. before/after respectively */ BYTES_DECOMPRESSED((byte) 0x1B), diff --git a/java/src/main/java/org/rocksdb/IndexType.java b/java/src/main/java/org/rocksdb/IndexType.java index 162edad1b..5615e929b 100644 --- a/java/src/main/java/org/rocksdb/IndexType.java +++ b/java/src/main/java/org/rocksdb/IndexType.java @@ -47,7 +47,7 @@ public enum IndexType { return value_; } - IndexType(byte value) { + IndexType(final byte value) { value_ = value; } diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java index b7c0f0700..197bd89da 100644 --- a/java/src/main/java/org/rocksdb/InfoLogLevel.java +++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -15,7 +15,7 @@ public enum InfoLogLevel { private final byte value_; - private InfoLogLevel(final byte value) { + InfoLogLevel(final byte value) { value_ = value; } diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index a6a308daa..1a6a5fccd 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -136,15 +136,15 @@ public class IngestExternalFileOptions extends RocksObject { /** * Set to true if you would like duplicate keys in the file being ingested * to be skipped rather than overwriting existing data under that key. - * + *

    * Usecase: back-fill of some historical data in the database without * over-writing existing newer version of data. - * + *

    * This option could only be used if the DB has been running * with DBOptions#allowIngestBehind() == true since the dawn of time. - * + *

    * All files will be ingested at the bottommost level with seqno=0. - * + *

    * Default: false * * @param ingestBehind true if you would like duplicate keys in the file being @@ -160,7 +160,7 @@ public class IngestExternalFileOptions extends RocksObject { /** * Returns true write if the global_seqno is written to a given offset * in the external SST file for backward compatibility. - * + *

    * See {@link #setWriteGlobalSeqno(boolean)}. * * @return true if the global_seqno is written to a given offset, @@ -173,21 +173,21 @@ public class IngestExternalFileOptions extends RocksObject { /** * Set to true if you would like to write the global_seqno to a given offset * in the external SST file for backward compatibility. - * + *

    * Older versions of RocksDB write the global_seqno to a given offset within * the ingested SST files, and new versions of RocksDB do not. - * + *

    * If you ingest an external SST using new version of RocksDB and would like * to be able to downgrade to an older version of RocksDB, you should set * {@link #writeGlobalSeqno()} to true. - * + *

    * If your service is just starting to use the new RocksDB, we recommend that * you set this option to false, which brings two benefits: * 1. No extra random write for global_seqno during ingestion. * 2. Without writing external SST file, it's possible to do checksum. - * + *

    * We have a plan to set this option to false by default in the future. - * + *

    * Default: true * * @param writeGlobalSeqno true to write the gloal_seqno to a given offset, @@ -201,10 +201,10 @@ public class IngestExternalFileOptions extends RocksObject { return this; } - private native static long newIngestExternalFileOptions(); - private native static long newIngestExternalFileOptions( - final boolean moveFiles, final boolean snapshotConsistency, - final boolean allowGlobalSeqNo, final boolean allowBlockingFlush); + private static native long newIngestExternalFileOptions(); + private static native long newIngestExternalFileOptions(final boolean moveFiles, + final boolean snapshotConsistency, final boolean allowGlobalSeqNo, + final boolean allowBlockingFlush); @Override protected final native void disposeInternal(final long handle); private native boolean moveFiles(final long handle); diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index 36185d8c9..6149b8529 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -24,7 +24,6 @@ public class KeyMayExist { } public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } - ; public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { this.exists = exists; diff --git a/java/src/main/java/org/rocksdb/LRUCache.java b/java/src/main/java/org/rocksdb/LRUCache.java index db90b17c5..0a9d02e87 100644 --- a/java/src/main/java/org/rocksdb/LRUCache.java +++ b/java/src/main/java/org/rocksdb/LRUCache.java @@ -99,7 +99,7 @@ public class LRUCache extends Cache { capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio)); } - private native static long newLRUCache(final long capacity, final int numShardBits, + private static native long newLRUCache(final long capacity, final int numShardBits, final boolean strictCapacityLimit, final double highPriPoolRatio, final double lowPriPoolRatio); @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index 00a5d5674..614a7fa50 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -36,9 +36,8 @@ package org.rocksdb; *

    */ public abstract class Logger extends RocksCallbackObject { - - private final static long WITH_OPTIONS = 0; - private final static long WITH_DBOPTIONS = 1; + private static final long WITH_OPTIONS = 0; + private static final long WITH_DBOPTIONS = 1; /** *

    AbstractLogger constructor.

    @@ -68,7 +67,7 @@ public abstract class Logger extends RocksCallbackObject { } @Override - protected long initializeNative(long... nativeParameterHandles) { + protected long initializeNative(final long... nativeParameterHandles) { if(nativeParameterHandles[1] == WITH_OPTIONS) { return createNewLoggerOptions(nativeParameterHandles[0]); } else if(nativeParameterHandles[1] == WITH_DBOPTIONS) { diff --git a/java/src/main/java/org/rocksdb/MemTableConfig.java b/java/src/main/java/org/rocksdb/MemTableConfig.java index 83cee974a..17033d251 100644 --- a/java/src/main/java/org/rocksdb/MemTableConfig.java +++ b/java/src/main/java/org/rocksdb/MemTableConfig.java @@ -8,7 +8,7 @@ package org.rocksdb; * MemTableConfig is used to config the internal mem-table of a RocksDB. * It is required for each memtable to have one such sub-class to allow * Java developers to use it. - * + *

    * To make a RocksDB to use a specific MemTable format, its associated * MemTableConfig should be properly set and passed into Options * via Options.setMemTableFactory() and open the db using that Options. @@ -25,5 +25,5 @@ public abstract class MemTableConfig { * * @return native handle address to native memory table instance. */ - abstract protected long newMemTableFactoryHandle(); + protected abstract long newMemTableFactoryHandle(); } diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java index f4fb577c3..3d429035a 100644 --- a/java/src/main/java/org/rocksdb/MemTableInfo.java +++ b/java/src/main/java/org/rocksdb/MemTableInfo.java @@ -77,12 +77,12 @@ public class MemTableInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - MemTableInfo that = (MemTableInfo) o; + final MemTableInfo that = (MemTableInfo) o; return firstSeqno == that.firstSeqno && earliestSeqno == that.earliestSeqno && numEntries == that.numEntries && numDeletes == that.numDeletes && Objects.equals(columnFamilyName, that.columnFamilyName); diff --git a/java/src/main/java/org/rocksdb/MemoryUsageType.java b/java/src/main/java/org/rocksdb/MemoryUsageType.java index 6010ce7af..40e6d1716 100644 --- a/java/src/main/java/org/rocksdb/MemoryUsageType.java +++ b/java/src/main/java/org/rocksdb/MemoryUsageType.java @@ -64,7 +64,7 @@ public enum MemoryUsageType { "Illegal value provided for MemoryUsageType."); } - MemoryUsageType(byte value) { + MemoryUsageType(final byte value) { value_ = value; } diff --git a/java/src/main/java/org/rocksdb/MemoryUtil.java b/java/src/main/java/org/rocksdb/MemoryUtil.java index 52b2175e6..15b9f001a 100644 --- a/java/src/main/java/org/rocksdb/MemoryUtil.java +++ b/java/src/main/java/org/rocksdb/MemoryUtil.java @@ -28,12 +28,12 @@ public class MemoryUtil { * @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}. */ public static Map getApproximateMemoryUsageByType(final List dbs, final Set caches) { - int dbCount = (dbs == null) ? 0 : dbs.size(); - int cacheCount = (caches == null) ? 0 : caches.size(); - long[] dbHandles = new long[dbCount]; - long[] cacheHandles = new long[cacheCount]; + final int dbCount = (dbs == null) ? 0 : dbs.size(); + final int cacheCount = (caches == null) ? 0 : caches.size(); + final long[] dbHandles = new long[dbCount]; + final long[] cacheHandles = new long[cacheCount]; if (dbCount > 0) { - ListIterator dbIter = dbs.listIterator(); + final ListIterator dbIter = dbs.listIterator(); while (dbIter.hasNext()) { dbHandles[dbIter.nextIndex()] = dbIter.next().nativeHandle_; } @@ -42,19 +42,19 @@ public class MemoryUtil { // NOTE: This index handling is super ugly but I couldn't get a clean way to track both the // index and the iterator simultaneously within a Set. int i = 0; - for (Cache cache : caches) { + for (final Cache cache : caches) { cacheHandles[i] = cache.nativeHandle_; i++; } } - Map byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles); - Map output = new HashMap<>(); - for(Map.Entry longEntry : byteOutput.entrySet()) { + final Map byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles); + final Map output = new HashMap<>(); + for (final Map.Entry longEntry : byteOutput.entrySet()) { output.put(MemoryUsageType.getMemoryUsageType(longEntry.getKey()), longEntry.getValue()); } return output; } - private native static Map getApproximateMemoryUsageByType(final long[] dbHandles, - final long[] cacheHandles); + private static native Map getApproximateMemoryUsageByType( + final long[] dbHandles, final long[] cacheHandles); } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index af28fa8ce..e54db7171 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -7,15 +7,13 @@ package org.rocksdb; import java.util.*; -public class MutableColumnFamilyOptions - extends AbstractMutableOptions { - +public class MutableColumnFamilyOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. * * @param keys the keys * @param values the values - * + *

    * See {@link #builder()} and {@link #parse(String)}. */ private MutableColumnFamilyOptions(final String[] keys, @@ -36,11 +34,11 @@ public class MutableColumnFamilyOptions /** * Parses a String representation of MutableColumnFamilyOptions - * + *

    * The format is: key1=value1;key2=value2;key3=value3 etc - * + *

    * For int[] values, each int should be separated by a colon, e.g. - * + *

    * key1=value1;intArrayKey1=1:2:3 * * @param str The string representation of the mutable column family options @@ -157,8 +155,8 @@ public class MutableColumnFamilyOptions public static class MutableColumnFamilyOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableColumnFamilyOptionsInterface { - - private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); + private static final Map ALL_KEYS_LOOKUP = + new HashMap<>(); static { for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) { ALL_KEYS_LOOKUP.put(key.name(), key); @@ -476,7 +474,7 @@ public class MutableColumnFamilyOptions @Override public CompressionType compressionType() { - return (CompressionType) getEnum(MiscOption.compression); + return getEnum(MiscOption.compression); } @Override @@ -549,7 +547,7 @@ public class MutableColumnFamilyOptions @Override public CompressionType blobCompressionType() { - return (CompressionType) getEnum(BlobOption.blob_compression_type); + return getEnum(BlobOption.blob_compression_type); } @Override @@ -617,7 +615,7 @@ public class MutableColumnFamilyOptions @Override public PrepopulateBlobCache prepopulateBlobCache() { - return (PrepopulateBlobCache) getEnum(BlobOption.prepopulate_blob_cache); + return getEnum(BlobOption.prepopulate_blob_cache); } } } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 0f5fe7d78..729b0e882 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -11,15 +11,15 @@ public interface MutableColumnFamilyOptionsInterface< /** * Amount of data to build up in memory (backed by an unsorted log * on disk) before converting to a sorted on-disk file. - * + *

    * Larger values increase performance, especially during bulk loads. * Up to {@code max_write_buffer_number} write buffers may be held in memory * at the same time, so you may wish to adjust this parameter * to control memory usage. - * + *

    * Also, a larger write buffer will result in a longer recovery time * the next time the database is opened. - * + *

    * Default: 64MB * @param writeBufferSize the size of write buffer. * @return the instance of the current object. @@ -56,7 +56,7 @@ public interface MutableColumnFamilyOptionsInterface< /** * Number of files to trigger level-0 compaction. A value < 0 means that * level-0 compaction will not be triggered by number of files at all. - * + *

    * Default: 4 * * @param level0FileNumCompactionTrigger The number of files to trigger @@ -68,7 +68,7 @@ public interface MutableColumnFamilyOptionsInterface< /** * Number of files to trigger level-0 compaction. A value < 0 means that * level-0 compaction will not be triggered by number of files at all. - * + *

    * Default: 4 * * @return The number of files to trigger @@ -109,7 +109,7 @@ public interface MutableColumnFamilyOptionsInterface< * @param maxBytesForLevelBase maximum bytes for level base. * * @return the reference to the current option. - * + *

    * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)} */ T setMaxBytesForLevelBase( @@ -127,7 +127,7 @@ public interface MutableColumnFamilyOptionsInterface< * * @return the upper-bound of the total size of level-1 files * in bytes. - * + *

    * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()} */ long maxBytesForLevelBase(); @@ -135,7 +135,7 @@ public interface MutableColumnFamilyOptionsInterface< /** * Compress blocks using the specified compression algorithm. This * parameter can be changed dynamically. - * + *

    * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * * @param compressionType Compression Type. @@ -147,7 +147,7 @@ public interface MutableColumnFamilyOptionsInterface< /** * Compress blocks using the specified compression algorithm. This * parameter can be changed dynamically. - * + *

    * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. * * @return Compression type. diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java index bfba1dab3..927e80522 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptions.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -11,13 +11,12 @@ import java.util.Map; import java.util.Objects; public class MutableDBOptions extends AbstractMutableOptions { - /** * User must use builder pattern, or parser. * * @param keys the keys * @param values the values - * + *

    * See {@link #builder()} and {@link #parse(String)}. */ private MutableDBOptions(final String[] keys, final String[] values) { @@ -37,11 +36,11 @@ public class MutableDBOptions extends AbstractMutableOptions { /** * Parses a String representation of MutableDBOptions - * + *

    * The format is: key1=value1;key2=value2;key3=value3 etc - * + *

    * For int[] values, each int should be separated by a comma, e.g. - * + *

    * key1=value1;intArrayKey1=1:2:3 * * @param str The string representation of the mutable db options @@ -49,7 +48,7 @@ public class MutableDBOptions extends AbstractMutableOptions { * * @return A builder for the mutable db options */ - public static MutableDBOptionsBuilder parse(final String str, boolean ignoreUnknown) { + public static MutableDBOptionsBuilder parse(final String str, final boolean ignoreUnknown) { Objects.requireNonNull(str); final List parsedOptions = OptionString.Parser.parse(str); @@ -93,8 +92,7 @@ public class MutableDBOptions extends AbstractMutableOptions { public static class MutableDBOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableDBOptionsInterface { - - private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); + private static final Map ALL_KEYS_LOOKUP = new HashMap<>(); static { for(final MutableDBOptionKey key : DBOption.values()) { ALL_KEYS_LOOKUP.put(key.name(), key); diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index bdf9d7bf6..8bf7b0d64 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -27,7 +27,7 @@ public interface MutableDBOptionsInterface * Specifies the maximum number of concurrent background compaction jobs, * submitted to the default LOW priority thread pool. * If you're increasing this, also consider increasing number of threads in @@ -52,7 +52,7 @@ public interface MutableDBOptionsInterface * Returns the maximum number of concurrent background compaction jobs, * submitted to the default LOW priority thread pool. * When increasing this number, we may also want to consider increasing @@ -72,9 +72,9 @@ public interface MutableDBOptionsInterface * DEFAULT: false - * + *

    * Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * API. @@ -90,9 +90,9 @@ public interface MutableDBOptionsInterface * DEFAULT: false - * + *

    * Dynamically changeable through * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * API. @@ -105,7 +105,7 @@ public interface MutableDBOptionsInterface * Default: 1024 * 1024 (1 MB) * * @param writableFileMaxBufferSize the maximum buffer size @@ -118,7 +118,7 @@ public interface MutableDBOptionsInterface * Default: 1024 * 1024 (1 MB) * * @return the maximum buffer size @@ -137,11 +137,11 @@ public interface MutableDBOptionsInterface * Unit: bytes per second. - * + *

    * Default: 0 - * + *

    * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. * * @param delayedWriteRate the rate in bytes per second @@ -162,11 +162,11 @@ public interface MutableDBOptionsInterface * Unit: bytes per second. - * + *

    * Default: 0 - * + *

    * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. * * @return the rate in bytes per second @@ -358,7 +358,7 @@ public interface MutableDBOptionsInterface * Default: 0, turned off * * @param walBytesPerSync size in bytes @@ -368,7 +368,7 @@ public interface MutableDBOptionsInterface * Default: 0, turned off * * @return size in bytes @@ -383,7 +383,7 @@ public interface MutableDBOptionsInterface * - If `sync_file_range` is supported it achieves this by waiting for any * prior `sync_file_range`s to finish before proceeding. In this way, * processing (compression, etc.) can proceed uninhibited in the gap @@ -391,11 +391,11 @@ public interface MutableDBOptionsInterface * Note: Enabling this option does not provide any additional persistence * guarantees, as it may use `sync_file_range`, which does not write out * metadata. - * + *

    * Default: false * * @param strictBytesPerSync the bytes per sync @@ -405,7 +405,7 @@ public interface MutableDBOptionsInterface * See {@link #setStrictBytesPerSync(boolean)} * * @return the limit in bytes. @@ -415,9 +415,9 @@ public interface MutableDBOptionsInterface * That way RocksDB's compaction is doing sequential instead of random reads. - * + *

    * Default: 0 * * @param compactionReadaheadSize The compaction read-ahead size @@ -429,9 +429,9 @@ public interface MutableDBOptionsInterface * That way RocksDB's compaction is doing sequential instead of random reads. - * + *

    * Default: 0 * * @return The compaction read-ahead size diff --git a/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java index 7f69eeb9e..fe689b5d0 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionValue.java +++ b/java/src/main/java/org/rocksdb/MutableOptionValue.java @@ -13,8 +13,7 @@ public abstract class MutableOptionValue { abstract String asString(); abstract T asObject(); - private static abstract class MutableOptionValueObject - extends MutableOptionValue { + private abstract static class MutableOptionValueObject extends MutableOptionValue { protected final T value; protected MutableOptionValueObject(final T value) { diff --git a/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java b/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java index 6acc146f7..5ee042a86 100644 --- a/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java +++ b/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java @@ -10,7 +10,7 @@ import java.nio.ByteBuffer; /** * A simple abstraction to allow a Java class to wrap a custom comparator * implemented in C++. - * + *

    * The native comparator must directly extend rocksdb::Comparator. */ public abstract class NativeComparatorWrapper diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java index 301caea32..bf7353468 100644 --- a/java/src/main/java/org/rocksdb/OperationType.java +++ b/java/src/main/java/org/rocksdb/OperationType.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * The type used to refer to a thread operation. - * + *

    * A thread operation describes high-level action of a thread, * examples include compaction and flush. */ diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 5a2e1f3ed..ac3cdc210 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -94,16 +94,15 @@ public class OptimisticTransactionDB extends RocksDB return otdb; } - /** * This is similar to {@link #close()} except that it * throws an exception if any error occurs. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. * * @throws RocksDBException if an error occurs whilst closing. @@ -121,12 +120,12 @@ public class OptimisticTransactionDB extends RocksDB /** * This is similar to {@link #closeE()} except that it * silently ignores any errors. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. */ @Override @@ -209,8 +208,7 @@ public class OptimisticTransactionDB extends RocksDB final String path) throws RocksDBException; protected static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); - private native static void closeDatabase(final long handle) - throws RocksDBException; + private static native void closeDatabase(final long handle) throws RocksDBException; private native long beginTransaction(final long handle, final long writeOptionsHandle); private native long beginTransaction(final long handle, diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java index 250edf806..a2f5d85ab 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -43,7 +43,7 @@ public class OptimisticTransactionOptions extends RocksObject return this; } - private native static long newOptimisticTransactionOptions(); + private static native long newOptimisticTransactionOptions(); private native boolean isSetSnapshot(final long handle); private native void setSetSnapshot(final long handle, final boolean setSnapshot); diff --git a/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java index a89b3313d..61d2a94fe 100644 --- a/java/src/main/java/org/rocksdb/OptionString.java +++ b/java/src/main/java/org/rocksdb/OptionString.java @@ -10,13 +10,13 @@ import java.util.List; import java.util.Objects; public class OptionString { - private final static char kvPairSeparator = ';'; - private final static char kvSeparator = '='; - private final static char complexValueBegin = '{'; - private final static char complexValueEnd = '}'; - private final static char wrappedValueBegin = '{'; - private final static char wrappedValueEnd = '}'; - private final static char arrayValueSeparator = ':'; + private static final char kvPairSeparator = ';'; + private static final char kvSeparator = '='; + private static final char complexValueBegin = '{'; + private static final char complexValueEnd = '}'; + private static final char wrappedValueBegin = '{'; + private static final char wrappedValueEnd = '}'; + private static final char arrayValueSeparator = ':'; static class Value { final List list; diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 54f88262b..08a07661c 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -11,7 +11,7 @@ import java.util.*; /** * Options to control the behavior of a database. It will be used * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). - * + *

    * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} * and will be automatically released if opened in the preamble of a try with resources block. */ @@ -33,7 +33,7 @@ public class Options extends RocksObject if (properties == null || properties.size() == 0) { throw new IllegalArgumentException("Properties value must contain at least one value."); } - StringBuilder stringBuilder = new StringBuilder(); + final StringBuilder stringBuilder = new StringBuilder(); for (final String name : properties.stringPropertyNames()) { stringBuilder.append(name); stringBuilder.append("="); @@ -45,7 +45,7 @@ public class Options extends RocksObject /** * Construct options for opening a RocksDB. - * + *

    * This constructor will create (by allocating a block of memory) * an {@code rocksdb::Options} in the c++ side. */ @@ -71,13 +71,13 @@ public class Options extends RocksObject /** * Copy constructor for ColumnFamilyOptions. - * + *

    * NOTE: This does a shallow copy, which means comparator, merge_operator * and other pointers will be cloned! * * @param other The Options to copy. */ - public Options(Options other) { + public Options(final Options other) { super(copyOptions(other.nativeHandle_)); this.env_ = other.env_; this.memTableConfig_ = other.memTableConfig_; @@ -179,8 +179,7 @@ public class Options extends RocksObject } @Override - public Options optimizeForPointLookup( - long blockCacheSizeMb) { + public Options optimizeForPointLookup(final long blockCacheSizeMb) { optimizeForPointLookup(nativeHandle_, blockCacheSizeMb); return this; @@ -194,8 +193,7 @@ public class Options extends RocksObject } @Override - public Options optimizeLevelStyleCompaction( - long memtableMemoryBudget) { + public Options optimizeLevelStyleCompaction(final long memtableMemoryBudget) { optimizeLevelStyleCompaction(nativeHandle_, memtableMemoryBudget); return this; @@ -388,8 +386,8 @@ public class Options extends RocksObject assert(isOwningHandle()); final int len = dbPaths.size(); - final String paths[] = new String[len]; - final long targetSizes[] = new long[len]; + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; int i = 0; for(final DbPath dbPath : dbPaths) { @@ -407,8 +405,8 @@ public class Options extends RocksObject if(len == 0) { return Collections.emptyList(); } else { - final String paths[] = new String[len]; - final long targetSizes[] = new long[len]; + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; dbPaths(nativeHandle_, paths, targetSizes); @@ -651,7 +649,7 @@ public class Options extends RocksObject } @Override - public Options setMaxWriteBatchGroupSizeBytes(long maxWriteBatchGroupSizeBytes) { + public Options setMaxWriteBatchGroupSizeBytes(final long maxWriteBatchGroupSizeBytes) { setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes); return this; } @@ -1066,7 +1064,8 @@ public class Options extends RocksObject } @Override - public Options setSkipCheckingSstFileSizesOnDbOpen(boolean skipCheckingSstFileSizesOnDbOpen) { + public Options setSkipCheckingSstFileSizesOnDbOpen( + final boolean skipCheckingSstFileSizesOnDbOpen) { setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen); return this; } @@ -1377,12 +1376,11 @@ public class Options extends RocksObject } @Override - public Options setCompressionType(CompressionType compressionType) { + public Options setCompressionType(final CompressionType compressionType) { setCompressionType(nativeHandle_, compressionType.getValue()); return this; } - @Override public Options setBottommostCompressionType( final CompressionType bottommostCompressionType) { @@ -1442,7 +1440,7 @@ public class Options extends RocksObject } @Override - public Options setNumLevels(int numLevels) { + public Options setNumLevels(final int numLevels) { setNumLevels(nativeHandle_, numLevels); return this; } @@ -1490,7 +1488,7 @@ public class Options extends RocksObject } @Override - public Options setTargetFileSizeBase(long targetFileSizeBase) { + public Options setTargetFileSizeBase(final long targetFileSizeBase) { setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); return this; } @@ -1501,7 +1499,7 @@ public class Options extends RocksObject } @Override - public Options setTargetFileSizeMultiplier(int multiplier) { + public Options setTargetFileSizeMultiplier(final int multiplier) { setTargetFileSizeMultiplier(nativeHandle_, multiplier); return this; } @@ -1662,7 +1660,7 @@ public class Options extends RocksObject } @Override - public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) { + public Options setMaxSuccessiveMerges(final long maxSuccessiveMerges) { setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); return this; } @@ -1692,9 +1690,7 @@ public class Options extends RocksObject } @Override - public Options - setMemtableHugePageSize( - long memtableHugePageSize) { + public Options setMemtableHugePageSize(final long memtableHugePageSize) { setMemtableHugePageSize(nativeHandle_, memtableHugePageSize); return this; @@ -1706,7 +1702,7 @@ public class Options extends RocksObject } @Override - public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) { + public Options setSoftPendingCompactionBytesLimit(final long softPendingCompactionBytesLimit) { setSoftPendingCompactionBytesLimit(nativeHandle_, softPendingCompactionBytesLimit); return this; @@ -1718,7 +1714,7 @@ public class Options extends RocksObject } @Override - public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) { + public Options setHardPendingCompactionBytesLimit(final long hardPendingCompactionBytesLimit) { setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit); return this; } @@ -1729,7 +1725,7 @@ public class Options extends RocksObject } @Override - public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) { + public Options setLevel0FileNumCompactionTrigger(final int level0FileNumCompactionTrigger) { setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger); return this; } @@ -1740,7 +1736,7 @@ public class Options extends RocksObject } @Override - public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) { + public Options setLevel0SlowdownWritesTrigger(final int level0SlowdownWritesTrigger) { setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger); return this; } @@ -1751,7 +1747,7 @@ public class Options extends RocksObject } @Override - public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) { + public Options setLevel0StopWritesTrigger(final int level0StopWritesTrigger) { setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger); return this; } @@ -1762,7 +1758,8 @@ public class Options extends RocksObject } @Override - public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) { + public Options setMaxBytesForLevelMultiplierAdditional( + final int[] maxBytesForLevelMultiplierAdditional) { setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional); return this; } @@ -1773,7 +1770,7 @@ public class Options extends RocksObject } @Override - public Options setParanoidFileChecks(boolean paranoidFileChecks) { + public Options setParanoidFileChecks(final boolean paranoidFileChecks) { setParanoidFileChecks(nativeHandle_, paranoidFileChecks); return this; } @@ -1892,7 +1889,7 @@ public class Options extends RocksObject } @Override - public Options setAvoidUnnecessaryBlockingIO(boolean avoidUnnecessaryBlockingIO) { + public Options setAvoidUnnecessaryBlockingIO(final boolean avoidUnnecessaryBlockingIO) { setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO); return this; } @@ -1904,7 +1901,7 @@ public class Options extends RocksObject } @Override - public Options setPersistStatsToDisk(boolean persistStatsToDisk) { + public Options setPersistStatsToDisk(final boolean persistStatsToDisk) { setPersistStatsToDisk(nativeHandle_, persistStatsToDisk); return this; } @@ -1916,7 +1913,7 @@ public class Options extends RocksObject } @Override - public Options setWriteDbidToManifest(boolean writeDbidToManifest) { + public Options setWriteDbidToManifest(final boolean writeDbidToManifest) { setWriteDbidToManifest(nativeHandle_, writeDbidToManifest); return this; } @@ -1928,7 +1925,7 @@ public class Options extends RocksObject } @Override - public Options setLogReadaheadSize(long logReadaheadSize) { + public Options setLogReadaheadSize(final long logReadaheadSize) { setLogReadaheadSize(nativeHandle_, logReadaheadSize); return this; } @@ -1940,7 +1937,7 @@ public class Options extends RocksObject } @Override - public Options setBestEffortsRecovery(boolean bestEffortsRecovery) { + public Options setBestEffortsRecovery(final boolean bestEffortsRecovery) { setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery); return this; } @@ -1952,7 +1949,7 @@ public class Options extends RocksObject } @Override - public Options setMaxBgErrorResumeCount(int maxBgerrorResumeCount) { + public Options setMaxBgErrorResumeCount(final int maxBgerrorResumeCount) { setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount); return this; } @@ -1964,7 +1961,7 @@ public class Options extends RocksObject } @Override - public Options setBgerrorResumeRetryInterval(long bgerrorResumeRetryInterval) { + public Options setBgerrorResumeRetryInterval(final long bgerrorResumeRetryInterval) { setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval); return this; } @@ -1976,7 +1973,7 @@ public class Options extends RocksObject } @Override - public Options setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) { + public Options setSstPartitionerFactory(final SstPartitionerFactory sstPartitionerFactory) { setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_); this.sstPartitionerFactory_ = sstPartitionerFactory; return this; @@ -2038,7 +2035,7 @@ public class Options extends RocksObject } @Override - public Options setBlobCompressionType(CompressionType compressionType) { + public Options setBlobCompressionType(final CompressionType compressionType) { setBlobCompressionType(nativeHandle_, compressionType.getValue()); return this; } @@ -2119,10 +2116,9 @@ public class Options extends RocksObject // END options for blobs (integrated BlobDB) // - private native static long newOptions(); - private native static long newOptions(long dbOptHandle, - long cfOptHandle); - private native static long copyOptions(long handle); + private static native long newOptions(); + private static native long newOptions(long dbOptHandle, long cfOptHandle); + private static native long copyOptions(long handle); @Override protected final native void disposeInternal(final long handle); private native void setEnv(long optHandle, long envHandle); private native void prepareForBulkLoad(long handle); diff --git a/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java index e21121a2b..612023d8e 100644 --- a/java/src/main/java/org/rocksdb/OptionsUtil.java +++ b/java/src/main/java/org/rocksdb/OptionsUtil.java @@ -12,12 +12,12 @@ public class OptionsUtil { * A static method to construct the DBOptions and ColumnFamilyDescriptors by * loading the latest RocksDB options file stored in the specified rocksdb * database. - * + *

    * Note that the all the pointer options (except table_factory, which will * be described in more details below) will be initialized with the default * values. Developers can further initialize them after this function call. * Below is an example list of pointer options which will be initialized. - * + *

    * - env * - memtable_factory * - compaction_filter_factory @@ -25,7 +25,7 @@ public class OptionsUtil { * - comparator * - merge_operator * - compaction_filter - * + *

    * For table_factory, this function further supports deserializing * BlockBasedTableFactory and its BlockBasedTableOptions except the * pointer options of BlockBasedTableOptions (flush_block_policy_factory, @@ -43,8 +43,9 @@ public class OptionsUtil { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public static void loadLatestOptions(ConfigOptions configOptions, String dbPath, - DBOptions dbOptions, List cfDescs) throws RocksDBException { + public static void loadLatestOptions(final ConfigOptions configOptions, final String dbPath, + final DBOptions dbOptions, final List cfDescs) + throws RocksDBException { loadLatestOptions(configOptions.nativeHandle_, dbPath, dbOptions.nativeHandle_, cfDescs); } @@ -62,8 +63,9 @@ public class OptionsUtil { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public static void loadOptionsFromFile(ConfigOptions configOptions, String optionsFileName, - DBOptions dbOptions, List cfDescs) throws RocksDBException { + public static void loadOptionsFromFile(final ConfigOptions configOptions, + final String optionsFileName, final DBOptions dbOptions, + final List cfDescs) throws RocksDBException { loadOptionsFromFile( configOptions.nativeHandle_, optionsFileName, dbOptions.nativeHandle_, cfDescs); } @@ -78,7 +80,8 @@ public class OptionsUtil { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public static String getLatestOptionsFileName(String dbPath, Env env) throws RocksDBException { + public static String getLatestOptionsFileName(final String dbPath, final Env env) + throws RocksDBException { return getLatestOptionsFileName(dbPath, env.nativeHandle_); } @@ -89,10 +92,10 @@ public class OptionsUtil { private OptionsUtil() {} // native methods - private native static void loadLatestOptions(long cfgHandle, String dbPath, long dbOptionsHandle, + private static native void loadLatestOptions(long cfgHandle, String dbPath, long dbOptionsHandle, List cfDescs) throws RocksDBException; - private native static void loadOptionsFromFile(long cfgHandle, String optionsFileName, + private static native void loadOptionsFromFile(long cfgHandle, String optionsFileName, long dbOptionsHandle, List cfDescs) throws RocksDBException; - private native static String getLatestOptionsFileName(String dbPath, long envHandle) + private static native String getLatestOptionsFileName(String dbPath, long envHandle) throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java index aed565297..5297111e6 100644 --- a/java/src/main/java/org/rocksdb/PersistentCache.java +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -18,9 +18,9 @@ public class PersistentCache extends RocksObject { logger.nativeHandle_, optimizedForNvm)); } - private native static long newPersistentCache(final long envHandle, - final String path, final long size, final long loggerHandle, - final boolean optimizedForNvm) throws RocksDBException; + private static native long newPersistentCache(final long envHandle, final String path, + final long size, final long loggerHandle, final boolean optimizedForNvm) + throws RocksDBException; @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index c09998167..46077ba56 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -48,7 +48,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param keySize the length of the user key. * @return the reference to the current config. */ - public PlainTableConfig setKeySize(int keySize) { + public PlainTableConfig setKeySize(final int keySize) { keySize_ = keySize; return this; } @@ -68,7 +68,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param bitsPerKey the number of bits per key for bloom filer. * @return the reference to the current config. */ - public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) { + public PlainTableConfig setBloomBitsPerKey(final int bitsPerKey) { bloomBitsPerKey_ = bitsPerKey; return this; } @@ -89,7 +89,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param ratio the hash table ratio. * @return the reference to the current config. */ - public PlainTableConfig setHashTableRatio(double ratio) { + public PlainTableConfig setHashTableRatio(final double ratio) { hashTableRatio_ = ratio; return this; } @@ -110,7 +110,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param sparseness the index sparseness. * @return the reference to the current config. */ - public PlainTableConfig setIndexSparseness(int sparseness) { + public PlainTableConfig setIndexSparseness(final int sparseness) { indexSparseness_ = sparseness; return this; } @@ -134,7 +134,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param hugePageTlbSize huge page tlb size * @return the reference to the current config. */ - public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) { + public PlainTableConfig setHugePageTlbSize(final int hugePageTlbSize) { this.hugePageTlbSize_ = hugePageTlbSize; return this; } @@ -166,7 +166,7 @@ public class PlainTableConfig extends TableFormatConfig { * @param encodingType {@link org.rocksdb.EncodingType} value. * @return the reference to the current config. */ - public PlainTableConfig setEncodingType(EncodingType encodingType) { + public PlainTableConfig setEncodingType(final EncodingType encodingType) { this.encodingType_ = encodingType; return this; } @@ -188,7 +188,7 @@ public class PlainTableConfig extends TableFormatConfig { * scan mode shall be enabled. * @return the reference to the current config. */ - public PlainTableConfig setFullScanMode(boolean fullScanMode) { + public PlainTableConfig setFullScanMode(final boolean fullScanMode) { this.fullScanMode_ = fullScanMode; return this; } @@ -212,7 +212,7 @@ public class PlainTableConfig extends TableFormatConfig { * be stored in a file * @return the reference to the current config. */ - public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) { + public PlainTableConfig setStoreIndexInFile(final boolean storeIndexInFile) { this.storeIndexInFile_ = storeIndexInFile; return this; } diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java old mode 100755 new mode 100644 index c638b17b7..65b781d16 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * The class that controls the get behavior. - * + *

    * Note that dispose() must be called before an Options instance * become out-of-scope to release the allocated memory in c++. */ @@ -27,13 +27,13 @@ public class ReadOptions extends RocksObject { /** * Copy constructor. - * + *

    * NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound * and other pointers will be cloned! * * @param other The ReadOptions to copy. */ - public ReadOptions(ReadOptions other) { + public ReadOptions(final ReadOptions other) { super(copyReadOptions(other.nativeHandle_)); this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_; this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_; @@ -106,7 +106,7 @@ public class ReadOptions extends RocksObject { */ public Snapshot snapshot() { assert(isOwningHandle()); - long snapshotHandle = snapshot(nativeHandle_); + final long snapshotHandle = snapshot(nativeHandle_); if (snapshotHandle != 0) { return new Snapshot(snapshotHandle); } @@ -128,7 +128,7 @@ public class ReadOptions extends RocksObject { if (snapshot != null) { setSnapshot(nativeHandle_, snapshot.nativeHandle_); } else { - setSnapshot(nativeHandle_, 0l); + setSnapshot(nativeHandle_, 0L); } return this; } @@ -256,7 +256,7 @@ public class ReadOptions extends RocksObject { * Enforce that the iterator only iterates over the same prefix as the seek. * This option is effective only for prefix seeks, i.e. prefix_extractor is * non-null for the column family and {@link #totalOrderSeek()} is false. - * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only + * Unlike iterate_upper_bound, {@code #setPrefixSameAsStart(boolean)} only * works within a prefix but in both directions. * * @param prefixSameAsStart if true, then the iterator only iterates over the @@ -300,7 +300,7 @@ public class ReadOptions extends RocksObject { * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we * schedule a background job in the flush job queue and delete obsolete files * in background. - * + *

    * Default: false * * @return true when PurgeObsoleteFile is called in CleanupIteratorState @@ -314,7 +314,7 @@ public class ReadOptions extends RocksObject { * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we * schedule a background job in the flush job queue and delete obsolete files * in background. - * + *

    * Default: false * * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is @@ -333,7 +333,7 @@ public class ReadOptions extends RocksObject { * If non-zero, NewIterator will create a new table reader which * performs reads of the given size. Using a large size (> 2MB) can * improve the performance of forward iteration on spinning disks. - * + *

    * Default: 0 * * @return The readahead size is bytes @@ -347,7 +347,7 @@ public class ReadOptions extends RocksObject { * If non-zero, NewIterator will create a new table reader which * performs reads of the given size. Using a large size (> 2MB) can * improve the performance of forward iteration on spinning disks. - * + *

    * Default: 0 * * @param readaheadSize The readahead size is bytes @@ -375,7 +375,7 @@ public class ReadOptions extends RocksObject { * A threshold for the number of keys that can be skipped before failing an * iterator seek as incomplete. The default value of 0 should be used to * never fail a request as incomplete, even on skipping too many keys. - * + *

    * Default: 0 * * @param maxSkippableInternalKeys the number of keys that can be skipped @@ -394,7 +394,7 @@ public class ReadOptions extends RocksObject { * If true, keys deleted using the DeleteRange() API will be visible to * readers until they are naturally deleted during compaction. This improves * read performance in DBs with many range deletions. - * + *

    * Default: false * * @return true if keys deleted using the DeleteRange() API will be visible @@ -408,7 +408,7 @@ public class ReadOptions extends RocksObject { * If true, keys deleted using the DeleteRange() API will be visible to * readers until they are naturally deleted during compaction. This improves * read performance in DBs with many range deletions. - * + *

    * Default: false * * @param ignoreRangeDeletions true if keys deleted using the DeleteRange() @@ -425,14 +425,14 @@ public class ReadOptions extends RocksObject { * Defines the smallest key at which the backward * iterator can return an entry. Once the bound is passed, * {@link RocksIterator#isValid()} will be false. - * + *

    * The lower bound is inclusive i.e. the bound value is a valid * entry. - * + *

    * If prefix_extractor is not null, the Seek target and `iterate_lower_bound` * need to have the same prefix. This is because ordering is not guaranteed * outside of prefix domain. - * + *

    * Default: null * * @param iterateLowerBound Slice representing the lower bound @@ -450,7 +450,7 @@ public class ReadOptions extends RocksObject { /** * Returns the smallest key at which the backward * iterator can return an entry. - * + *

    * The lower bound is inclusive i.e. the bound value is a valid entry. * * @return the smallest key, or null if there is no lower bound defined. @@ -468,15 +468,15 @@ public class ReadOptions extends RocksObject { /** * Defines the extent up to which the forward iterator - * can returns entries. Once the bound is reached, + * can return entries. Once the bound is reached, * {@link RocksIterator#isValid()} will be false. - * + *

    * The upper bound is exclusive i.e. the bound value is not a valid entry. - * + *

    * If prefix_extractor is not null, the Seek target and iterate_upper_bound * need to have the same prefix. This is because ordering is not guaranteed * outside of prefix domain. - * + *

    * Default: null * * @param iterateUpperBound Slice representing the upper bound @@ -494,7 +494,7 @@ public class ReadOptions extends RocksObject { /** * Returns the largest key at which the forward * iterator can return an entry. - * + *

    * The upper bound is exclusive i.e. the bound value is not a valid entry. * * @return the largest key, or null if there is no upper bound defined. @@ -516,7 +516,7 @@ public class ReadOptions extends RocksObject { * properties of each table during iteration. If the callback returns false, * the table will not be scanned. This option only affects Iterators and has * no impact on point lookups. - * + *

    * Default: null (every table will be scanned) * * @param tableFilter the table filter for the callback. @@ -568,7 +568,7 @@ public class ReadOptions extends RocksObject { * only the most recent version visible to timestamp is returned. * The user-specified timestamp feature is still under active development, * and the API is subject to change. - * + *

    * Default: null * @see #iterStartTs() * @return Reference to timestamp or null if there is no timestamp defined. @@ -594,7 +594,7 @@ public class ReadOptions extends RocksObject { * only the most recent version visible to timestamp is returned. * The user-specified timestamp feature is still under active development, * and the API is subject to change. - * + *

    * Default: null * @see #setIterStartTs(AbstractSlice) * @param timestamp Slice representing the timestamp @@ -618,7 +618,7 @@ public class ReadOptions extends RocksObject { * only the most recent version visible to timestamp is returned. * The user-specified timestamp feature is still under active development, * and the API is subject to change. - * + *

    * Default: null * @return Reference to lower bound timestamp or null if there is no lower bound timestamp * defined. @@ -644,7 +644,7 @@ public class ReadOptions extends RocksObject { * only the most recent version visible to timestamp is returned. * The user-specified timestamp feature is still under active development, * and the API is subject to change. - * + *

    * Default: null * * @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound @@ -727,7 +727,7 @@ public class ReadOptions extends RocksObject { * It limits the maximum cumulative value size of the keys in batch while * reading through MultiGet. Once the cumulative value size exceeds this * soft limit then all the remaining keys are returned with status Aborted. - * + *

    * Default: {@code std::numeric_limits::max()} * @return actual valueSizeSofLimit */ @@ -740,7 +740,7 @@ public class ReadOptions extends RocksObject { * It limits the maximum cumulative value size of the keys in batch while * reading through MultiGet. Once the cumulative value size exceeds this * soft limit then all the remaining keys are returned with status Aborted. - * + *

    * Default: {@code std::numeric_limits::max()} * * @param valueSizeSoftLimit the maximum cumulative value size of the keys @@ -765,10 +765,9 @@ public class ReadOptions extends RocksObject { private AbstractSlice timestampSlice_; private AbstractSlice iterStartTs_; - private native static long newReadOptions(); - private native static long newReadOptions(final boolean verifyChecksums, - final boolean fillCache); - private native static long copyReadOptions(long handle); + private static native long newReadOptions(); + private static native long newReadOptions(final boolean verifyChecksums, final boolean fillCache); + private static native long copyReadOptions(long handle); @Override protected final native void disposeInternal(final long handle); private native boolean verifyChecksums(long handle); diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index 6ee81d858..e96694313 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -14,5 +14,5 @@ public class RemoveEmptyValueCompactionFilter super(createNewRemoveEmptyValueCompactionFilter0()); } - private native static long createNewRemoveEmptyValueCompactionFilter0(); + private static native long createNewRemoveEmptyValueCompactionFilter0(); } diff --git a/java/src/main/java/org/rocksdb/RestoreOptions.java b/java/src/main/java/org/rocksdb/RestoreOptions.java index 54dc0e61c..a6b43d476 100644 --- a/java/src/main/java/org/rocksdb/RestoreOptions.java +++ b/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * RestoreOptions to control the behavior of restore. - * + *

    * Note that dispose() must be called before this instance become out-of-scope * to release the allocated memory in c++. * @@ -27,6 +27,6 @@ public class RestoreOptions extends RocksObject { super(newRestoreOptions(keepLogFiles)); } - private native static long newRestoreOptions(boolean keepLogFiles); + private static native long newRestoreOptions(boolean keepLogFiles); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 8d7a867ee..2b9de4b8e 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -11,10 +11,10 @@ import java.util.List; * RocksCallbackObject is similar to {@link RocksObject} but varies * in its construction as it is designed for Java objects which have functions * which are called from C++ via JNI. - * + *

    * RocksCallbackObject is the base-class any RocksDB classes that acts as a * callback from some underlying underlying native C++ {@code rocksdb} object. - * + *

    * The use of {@code RocksObject} should always be preferred over * {@link RocksCallbackObject} if callbacks are not required. */ diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 77484288f..fb35208bc 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -9,10 +9,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.atomic.AtomicReference; import org.rocksdb.util.Environment; @@ -343,7 +340,7 @@ public class RocksDB extends RocksObject { * The factory constructor of RocksDB that opens a RocksDB instance in * Read-Only mode given the path to the database using the specified * options and db path. - * + *

    * Options instance *should* not be disposed before all DBs using this options * instance have been closed. If user doesn't call options dispose explicitly, * then this options instance will be GC'd automatically. @@ -365,7 +362,7 @@ public class RocksDB extends RocksObject { * The factory constructor of RocksDB that opens a RocksDB instance in * Read-Only mode given the path to the database using the specified * options and db path. - * + *

    * Options instance *should* not be disposed before all DBs using this options * instance have been closed. If user doesn't call options dispose explicitly, * then this options instance will be GC'd automatically. @@ -501,7 +498,7 @@ public class RocksDB extends RocksObject { /** * Open DB as secondary instance with only the default column family. - * + *

    * The secondary instance can dynamically tail the MANIFEST of * a primary that must have already been created. User can call * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up @@ -538,7 +535,7 @@ public class RocksDB extends RocksObject { /** * Open DB as secondary instance with column families. * You can open a subset of column families in secondary mode. - * + *

    * The secondary instance can dynamically tail the MANIFEST of * a primary that must have already been created. User can call * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up @@ -598,12 +595,12 @@ public class RocksDB extends RocksObject { /** * This is similar to {@link #close()} except that it * throws an exception if any error occurs. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. * * @throws RocksDBException if an error occurs whilst closing. @@ -626,12 +623,12 @@ public class RocksDB extends RocksObject { /** * This is similar to {@link #closeE()} except that it * silently ignores any errors. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. */ @Override @@ -711,8 +708,8 @@ public class RocksDB extends RocksObject { columnFamilyOptions.nativeHandle_, cfNames); final List columnFamilyHandles = new ArrayList<>(cfHandles.length); - for (int i = 0; i < cfHandles.length; i++) { - final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); + for (final long cfHandle : cfHandles) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandle); columnFamilyHandles.add(columnFamilyHandle); } ownedColumnFamilyHandles.addAll(columnFamilyHandles); @@ -744,8 +741,8 @@ public class RocksDB extends RocksObject { cfOptsHandles, cfNames); final List columnFamilyHandles = new ArrayList<>(cfHandles.length); - for (int i = 0; i < cfHandles.length; i++) { - final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); + for (final long cfHandle : cfHandles) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandle); columnFamilyHandles.add(columnFamilyHandle); } ownedColumnFamilyHandles.addAll(columnFamilyHandles); @@ -846,7 +843,7 @@ public class RocksDB extends RocksObject { * instance * @param key the specified key to be inserted. * @param value the value associated with the specified key. - * + *

    * throws IllegalArgumentException if column family is not present * * @throws RocksDBException thrown if error happens in underlying @@ -943,7 +940,7 @@ public class RocksDB extends RocksObject { * @param writeOpts {@link org.rocksdb.WriteOptions} instance. * @param key the specified key to be inserted. * @param value the value associated with the specified key. - * + *

    * throws IllegalArgumentException if column family is not present * * @throws RocksDBException thrown if error happens in underlying @@ -968,7 +965,7 @@ public class RocksDB extends RocksObject { * Supports direct buffer only. * @param value the value associated with the specified key. Position and limit is used. * Supports direct buffer only. - * + *

    * throws IllegalArgumentException if column family is not present * * @throws RocksDBException thrown if error happens in underlying @@ -992,7 +989,7 @@ public class RocksDB extends RocksObject { * Supports direct buffer only. * @param value the value associated with the specified key. Position and limit is used. * Supports direct buffer only. - * + *

    * throws IllegalArgumentException if column family is not present * * @throws RocksDBException thrown if error happens in underlying @@ -1215,8 +1212,8 @@ public class RocksDB extends RocksObject { public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { assert key.isDirect() && value.isDirect(); - int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), - value, value.position(), value.remaining(), 0); + final int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), + key.remaining(), value, value.position(), value.remaining(), 0); if (result != NOT_FOUND) { value.limit(Math.min(value.limit(), value.position() + result)); } @@ -1248,8 +1245,9 @@ public class RocksDB extends RocksObject { public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { assert key.isDirect() && value.isDirect(); - int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), - value, value.position(), value.remaining(), columnFamilyHandle.nativeHandle_); + final int result = + getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), value, + value.position(), value.remaining(), columnFamilyHandle.nativeHandle_); if (result != NOT_FOUND) { value.limit(Math.min(value.limit(), value.position() + result)); } @@ -1261,12 +1259,12 @@ public class RocksDB extends RocksObject { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or @@ -1287,12 +1285,12 @@ public class RocksDB extends RocksObject { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or @@ -1316,18 +1314,18 @@ public class RocksDB extends RocksObject { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or * written using Merge(). Mixing SingleDelete operations with Deletes and * Merges can result in undefined behavior. - * + *

    * Note: consider setting {@link WriteOptions#setSync(boolean)} true. * * @param writeOpt Write options for the delete @@ -1346,18 +1344,18 @@ public class RocksDB extends RocksObject { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or * written using Merge(). Mixing SingleDelete operations with Deletes and * Merges can result in undefined behavior. - * + *

    * Note: consider setting {@link WriteOptions#setSync(boolean)} true. * * @param columnFamilyHandle The column family to delete the key from @@ -1374,12 +1372,11 @@ public class RocksDB extends RocksObject { columnFamilyHandle.nativeHandle_); } - /** * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -1400,7 +1397,7 @@ public class RocksDB extends RocksObject { * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -1422,7 +1419,7 @@ public class RocksDB extends RocksObject { * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -1444,7 +1441,7 @@ public class RocksDB extends RocksObject { * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -1501,7 +1498,7 @@ public class RocksDB extends RocksObject { * native library. * @throws IndexOutOfBoundsException if an offset or length is out of bounds */ - public void merge(final byte[] key, int offset, int len, final byte[] value, + public void merge(final byte[] key, final int offset, final int len, final byte[] value, final int vOffset, final int vLen) throws RocksDBException { checkBounds(offset, len, key.length); checkBounds(vOffset, vLen, value.length); @@ -2425,10 +2422,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a false negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(byte[])}. One way to make this lighter weight is to avoid * doing any IOs. @@ -2451,10 +2448,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a false negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(byte[], int, int)}. One way to make this lighter weight is to * avoid doing any IOs. @@ -2482,10 +2479,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a false negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter * weight is to avoid doing any IOs. @@ -2511,10 +2508,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a false negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this * lighter weight is to avoid doing any IOs. @@ -2532,9 +2529,8 @@ public class RocksDB extends RocksObject { * @return false if the key definitely does not exist in the database, * otherwise true. */ - public boolean keyMayExist( - final ColumnFamilyHandle columnFamilyHandle, - final byte[] key, int offset, int len, + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final int offset, final int len, /* @Nullable */ final Holder valueHolder) { return keyMayExist(columnFamilyHandle, null, key, offset, len, valueHolder); @@ -2545,10 +2541,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a true negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ReadOptions, byte[])}. One way to make this * lighter weight is to avoid doing any IOs. @@ -2574,10 +2570,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a true negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ReadOptions, byte[], int, int)}. One way to make this * lighter weight is to avoid doing any IOs. @@ -2608,10 +2604,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a true negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this * lighter weight is to avoid doing any IOs. @@ -2639,10 +2635,10 @@ public class RocksDB extends RocksObject { * returns false, otherwise it returns true if the key might exist. * That is to say that this method is probabilistic and may return false * positives, but never a false negative. - * + *

    * If the caller wants to obtain value when the key * is found in memory, then {@code valueHolder} must be set. - * + *

    * This check is potentially lighter-weight than invoking * {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}. * One way to make this lighter weight is to avoid doing any IOs. @@ -2985,7 +2981,7 @@ public class RocksDB extends RocksObject { * @return Snapshot {@link Snapshot} instance */ public Snapshot getSnapshot() { - long snapshotHandle = getSnapshot(nativeHandle_); + final long snapshotHandle = getSnapshot(nativeHandle_); if (snapshotHandle != 0) { return new Snapshot(snapshotHandle); } @@ -2994,7 +2990,7 @@ public class RocksDB extends RocksObject { /** * Release a previously acquired snapshot. - * + *

    * The caller must not use "snapshot" after this call. * * @param snapshot {@link Snapshot} instance @@ -3161,7 +3157,7 @@ public class RocksDB extends RocksObject { /** * Reset internal stats for DB and all column families. - * + *

    * Note this doesn't reset {@link Options#statistics()} as it is not * owned by DB. * @@ -3200,11 +3196,11 @@ public class RocksDB extends RocksObject { /** * Get the approximate file system space used by keys in each range. - * + *

    * Note that the returned sizes measure file system space usage, so * if the user data compresses by a factor of ten, the returned * sizes will be one-tenth the size of the corresponding user data size. - * + *

    * If {@code sizeApproximationFlags} defines whether the returned size * should include the recently written data in the mem-tables (if * the mem-table type supports it), data serialized to disk, or both. @@ -3236,11 +3232,11 @@ public class RocksDB extends RocksObject { /** * Get the approximate file system space used by keys in each range for * the default column family. - * + *

    * Note that the returned sizes measure file system space usage, so * if the user data compresses by a factor of ten, the returned * sizes will be one-tenth the size of the corresponding user data size. - * + *

    * If {@code sizeApproximationFlags} defines whether the returned size * should include the recently written data in the mem-tables (if * the mem-table type supports it), data serialized to disk, or both. @@ -3450,7 +3446,7 @@ public class RocksDB extends RocksObject { */ public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions( /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { - String optionsString = getOptions( + final String optionsString = getOptions( nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); return MutableColumnFamilyOptions.parse(optionsString, true); } @@ -3477,7 +3473,7 @@ public class RocksDB extends RocksObject { * resulting options string into options */ public MutableDBOptions.MutableDBOptionsBuilder getDBOptions() throws RocksDBException { - String optionsString = getDBOptions(nativeHandle_); + final String optionsString = getDBOptions(nativeHandle_); return MutableDBOptions.parse(optionsString, true); } @@ -3511,7 +3507,7 @@ public class RocksDB extends RocksObject { /** * Takes a list of files specified by file names and * compacts them to the specified level. - * + *

    * Note that the behavior is different from * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} * in that CompactFiles() performs the compaction job using the CURRENT @@ -3543,7 +3539,7 @@ public class RocksDB extends RocksObject { /** * Takes a list of files specified by file names and * compacts them to the specified level. - * + *

    * Note that the behavior is different from * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} * in that CompactFiles() performs the compaction job using the CURRENT @@ -3586,7 +3582,7 @@ public class RocksDB extends RocksObject { * returning. * */ - public void cancelAllBackgroundWork(boolean wait) { + public void cancelAllBackgroundWork(final boolean wait) { cancelAllBackgroundWork(nativeHandle_, wait); } @@ -3614,11 +3610,11 @@ public class RocksDB extends RocksObject { /** * Enable automatic compactions for the given column * families if they were previously disabled. - * + *

    * The function will first set the * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each * column family to false, after which it will schedule a flush/compaction. - * + *

    * NOTE: Setting disableAutoCompactions to 'false' through * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} * does NOT schedule a flush/compaction afterwards, and only changes the @@ -3761,15 +3757,15 @@ public class RocksDB extends RocksObject { /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { flush(flushOptions, - columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle)); + columnFamilyHandle == null ? null : Collections.singletonList(columnFamilyHandle)); } /** * Flushes multiple column families. - * + *

    * If atomic flush is not enabled, this is equivalent to calling * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times. - * + *

    * If atomic flush is enabled, this will flush all column families * specified up to the latest sequence number at the time when flush is * requested. @@ -3800,13 +3796,13 @@ public class RocksDB extends RocksObject { /** * Sync the WAL. - * + *

    * Note that {@link #write(WriteOptions, WriteBatch)} followed by - * {@link #syncWal()} is not exactly the same as + * {@code #syncWal()} is not exactly the same as * {@link #write(WriteOptions, WriteBatch)} with * {@link WriteOptions#sync()} set to true; In the latter case the changes * won't be visible until the sync is done. - * + *

    * Currently only works if {@link Options#allowMmapWrites()} is set to false. * * @throws RocksDBException if an error occurs whilst syncing @@ -3884,7 +3880,7 @@ public class RocksDB extends RocksObject { /** * Retrieve the list of all files in the database after flushing the memtable. - * + *

    * See {@link #getLiveFiles(boolean)}. * * @return the live files @@ -3898,14 +3894,14 @@ public class RocksDB extends RocksObject { /** * Retrieve the list of all files in the database. - * + *

    * In case you have multiple column families, even if {@code flushMemtable} * is true, you still need to call {@link #getSortedWalFiles()} - * after {@link #getLiveFiles(boolean)} to compensate for new data that + * after {@code #getLiveFiles(boolean)} to compensate for new data that * arrived to already-flushed column families while other column families * were flushing. - * - * NOTE: Calling {@link #getLiveFiles(boolean)} followed by + *

    + * NOTE: Calling {@code #getLiveFiles(boolean)} followed by * {@link #getSortedWalFiles()} can generate a lossless backup. * * @param flushMemtable set to true to flush before recoding the live @@ -4016,7 +4012,7 @@ public class RocksDB extends RocksObject { * ingest the file into this level (2). A file that have a key range that * overlap with the memtable key range will require us to Flush the memtable * first before ingesting the file. - * + *

    * (1) External SST files can be created using {@link SstFileWriter} * (2) We will try to ingest the files to the lowest possible level * even if the file compression doesn't match the level compression @@ -4041,7 +4037,7 @@ public class RocksDB extends RocksObject { * ingest the file into this level (2). A file that have a key range that * overlap with the memtable key range will require us to Flush the memtable * first before ingesting the file. - * + *

    * (1) External SST files can be created using {@link SstFileWriter} * (2) We will try to ingest the files to the lowest possible level * even if the file compression doesn't match the level compression @@ -4207,7 +4203,7 @@ public class RocksDB extends RocksObject { /** * Trace DB operations. - * + *

    * Use {@link #endTrace()} to stop tracing. * * @param traceOptions the options @@ -4219,7 +4215,7 @@ public class RocksDB extends RocksObject { final AbstractTraceWriter traceWriter) throws RocksDBException { startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(), traceWriter.nativeHandle_); - /** + /* * NOTE: {@link #startTrace(long, long, long) transfers the ownership * from Java to C++, so we must disown the native handle here. */ @@ -4228,7 +4224,7 @@ public class RocksDB extends RocksObject { /** * Stop tracing DB operations. - * + *

    * See {@link #startTrace(TraceOptions, AbstractTraceWriter)} * * @throws RocksDBException if an error occurs whilst ending the trace @@ -4314,7 +4310,7 @@ public class RocksDB extends RocksObject { } private static long[] toRangeSliceHandles(final List ranges) { - final long rangeSliceHandles[] = new long [ranges.size() * 2]; + final long[] rangeSliceHandles = new long[ranges.size() * 2]; for (int i = 0, j = 0; i < ranges.size(); i++) { final Range range = ranges.get(i); rangeSliceHandles[j++] = range.start.getNativeHandle(); @@ -4323,11 +4319,11 @@ public class RocksDB extends RocksObject { return rangeSliceHandles; } - protected void storeOptionsInstance(DBOptionsInterface options) { + protected void storeOptionsInstance(final DBOptionsInterface options) { options_ = options; } - private static void checkBounds(int offset, int len, int size) { + private static void checkBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); } @@ -4340,8 +4336,8 @@ public class RocksDB extends RocksObject { } // native methods - private native static long open(final long optionsHandle, - final String path) throws RocksDBException; + private static native long open(final long optionsHandle, final String path) + throws RocksDBException; /** * @param optionsHandle Native handle pointing to an Options object @@ -4355,11 +4351,10 @@ public class RocksDB extends RocksObject { * * @throws RocksDBException thrown if the database could not be opened */ - private native static long[] open(final long optionsHandle, - final String path, final byte[][] columnFamilyNames, - final long[] columnFamilyOptions) throws RocksDBException; + private static native long[] open(final long optionsHandle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions) throws RocksDBException; - private native static long openROnly(final long optionsHandle, final String path, + private static native long openROnly(final long optionsHandle, final String path, final boolean errorIfWalFileExists) throws RocksDBException; /** @@ -4374,31 +4369,30 @@ public class RocksDB extends RocksObject { * * @throws RocksDBException thrown if the database could not be opened */ - private native static long[] openROnly(final long optionsHandle, final String path, + private static native long[] openROnly(final long optionsHandle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions, final boolean errorIfWalFileExists) throws RocksDBException; - private native static long openAsSecondary(final long optionsHandle, final String path, + private static native long openAsSecondary(final long optionsHandle, final String path, final String secondaryPath) throws RocksDBException; - private native static long[] openAsSecondary(final long optionsHandle, final String path, + private static native long[] openAsSecondary(final long optionsHandle, final String path, final String secondaryPath, final byte[][] columnFamilyNames, final long[] columnFamilyOptions) throws RocksDBException; @Override protected native void disposeInternal(final long handle); - private native static void closeDatabase(final long handle) + private static native void closeDatabase(final long handle) throws RocksDBException; + private static native byte[][] listColumnFamilies(final long optionsHandle, final String path) throws RocksDBException; - private native static byte[][] listColumnFamilies(final long optionsHandle, - final String path) throws RocksDBException; private native long createColumnFamily(final long handle, final byte[] columnFamilyName, final int columnFamilyNamelen, final long columnFamilyOptions) throws RocksDBException; private native long[] createColumnFamilies(final long handle, final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames) throws RocksDBException; - private native long[] createColumnFamilies(final long handle, - final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames) + private native long[] createColumnFamilies( + final long handle, final long[] columnFamilyOptionsHandles, final byte[][] columnFamilyNames) throws RocksDBException; private native void dropColumnFamily( final long handle, final long cfHandle) throws RocksDBException; @@ -4645,10 +4639,10 @@ public class RocksDB extends RocksObject { private native void deleteFilesInRanges(long handle, long cfHandle, final byte[][] ranges, boolean include_end) throws RocksDBException; - private native static void destroyDB(final String path, - final long optionsHandle) throws RocksDBException; + private static native void destroyDB(final String path, final long optionsHandle) + throws RocksDBException; - private native static int version(); + private static native int version(); protected DBOptionsInterface options_; private static Version version; diff --git a/java/src/main/java/org/rocksdb/RocksEnv.java b/java/src/main/java/org/rocksdb/RocksEnv.java index b3681d77d..ca010c9f9 100644 --- a/java/src/main/java/org/rocksdb/RocksEnv.java +++ b/java/src/main/java/org/rocksdb/RocksEnv.java @@ -27,6 +27,5 @@ public class RocksEnv extends Env { super(handle); } - @Override - protected native final void disposeInternal(final long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index e92289dc0..eb3215290 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -71,7 +71,7 @@ public abstract class RocksMutableObject extends AbstractNativeReference { } @Override - public synchronized final void close() { + public final synchronized void close() { if (isOwningHandle()) { disposeInternal(); this.owningHandle_ = false; diff --git a/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java index 50d9f7652..6a01374d6 100644 --- a/java/src/main/java/org/rocksdb/Slice.java +++ b/java/src/main/java/org/rocksdb/Slice.java @@ -125,9 +125,8 @@ public class Slice extends AbstractSlice { } @Override protected final native byte[] data0(long handle); - private native static long createNewSlice0(final byte[] data, - final int length); - private native static long createNewSlice1(final byte[] data); + private static native long createNewSlice0(final byte[] data, final int length); + private static native long createNewSlice1(final byte[] data); private native void clear0(long handle, boolean internalBuffer, long internalBufferOffset); private native void removePrefix0(long handle, int length); diff --git a/java/src/main/java/org/rocksdb/Snapshot.java b/java/src/main/java/org/rocksdb/Snapshot.java index 39cdf0c2d..1f471bd31 100644 --- a/java/src/main/java/org/rocksdb/Snapshot.java +++ b/java/src/main/java/org/rocksdb/Snapshot.java @@ -29,7 +29,7 @@ public class Snapshot extends RocksObject { @Override protected final void disposeInternal(final long handle) { - /** + /* * Nothing to release, we never own the pointer for a * Snapshot. The pointer * to the snapshot is released by the database diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index 8805410aa..0b9a60061 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -10,9 +10,9 @@ import java.util.Map; /** * SstFileManager is used to track SST files in the DB and control their * deletion rate. - * + *

    * All SstFileManager public functions are thread-safe. - * + *

    * SstFileManager is not extensible. */ //@ThreadSafe @@ -55,7 +55,7 @@ public final class SstFileManager extends RocksObject { * * @param env the environment. * @param logger if not null, the logger will be used to log errors. - * + *

    * == Deletion rate limiting specific arguments == * @param rateBytesPerSec how many bytes should be deleted per second, If * this value is set to 1024 (1 Kb / sec) and we deleted a file of size @@ -75,7 +75,7 @@ public final class SstFileManager extends RocksObject { * * @param env the environment. * @param logger if not null, the logger will be used to log errors. - * + *

    * == Deletion rate limiting specific arguments == * @param rateBytesPerSec how many bytes should be deleted per second, If * this value is set to 1024 (1 Kb / sec) and we deleted a file of size @@ -100,7 +100,7 @@ public final class SstFileManager extends RocksObject { * * @param env the environment. * @param logger if not null, the logger will be used to log errors. - * + *

    * == Deletion rate limiting specific arguments == * @param rateBytesPerSec how many bytes should be deleted per second, If * this value is set to 1024 (1 Kb / sec) and we deleted a file of size @@ -123,12 +123,11 @@ public final class SstFileManager extends RocksObject { rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk)); } - /** * Update the maximum allowed space that should be used by RocksDB, if * the total size of the SST files exceeds {@code maxAllowedSpace}, writes to * RocksDB will fail. - * + *

    * Setting {@code maxAllowedSpace} to 0 will disable this feature; * maximum allowed space will be infinite (Default value). * @@ -202,7 +201,7 @@ public final class SstFileManager extends RocksObject { /** * Set the delete rate limit. - * + *

    * Zero means disable delete rate limiting and delete files immediately. * * @param deleteRate the delete rate limit (in bytes per second). @@ -229,9 +228,8 @@ public final class SstFileManager extends RocksObject { setMaxTrashDBRatio(nativeHandle_, ratio); } - private native static long newSstFileManager(final long handle, - final long logger_handle, final long rateBytesPerSec, - final double maxTrashDbRatio, final long bytesMaxDeleteChunk) + private static native long newSstFileManager(final long handle, final long logger_handle, + final long rateBytesPerSec, final double maxTrashDbRatio, final long bytesMaxDeleteChunk) throws RocksDBException; private native void setMaxAllowedSpaceUsage(final long handle, final long maxAllowedSpace); @@ -247,5 +245,5 @@ public final class SstFileManager extends RocksObject { final long deleteRate); private native double getMaxTrashDBRatio(final long handle); private native void setMaxTrashDBRatio(final long handle, final double ratio); - @Override protected final native void disposeInternal(final long handle); + @Override protected native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java index bb1e94ee0..678c3519c 100644 --- a/java/src/main/java/org/rocksdb/SstFileReader.java +++ b/java/src/main/java/org/rocksdb/SstFileReader.java @@ -18,12 +18,12 @@ public class SstFileReader extends RocksObject { * Returns an iterator that will iterate on all keys in the default * column family including both keys in the DB and uncommitted keys in this * transaction. - * + *

    * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be * fetched regardless). - * + *

    * Caller is responsible for deleting the returned Iterator. * * @param readOptions Read options. @@ -32,7 +32,7 @@ public class SstFileReader extends RocksObject { */ public SstFileReaderIterator newIterator(final ReadOptions readOptions) { assert (isOwningHandle()); - long iter = newIterator(nativeHandle_, readOptions.nativeHandle_); + final long iter = newIterator(nativeHandle_, readOptions.nativeHandle_); return new SstFileReaderIterator(this, iter); } @@ -75,7 +75,7 @@ public class SstFileReader extends RocksObject { private native void open(final long handle, final String filePath) throws RocksDBException; - private native static long newSstFileReader(final long optionsHandle); + private static native long newSstFileReader(final long optionsHandle); private native void verifyChecksum(final long handle) throws RocksDBException; private native TableProperties getTableProperties(final long handle) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/SstFileWriter.java b/java/src/main/java/org/rocksdb/SstFileWriter.java index fe00c1a12..5dd0b6dd5 100644 --- a/java/src/main/java/org/rocksdb/SstFileWriter.java +++ b/java/src/main/java/org/rocksdb/SstFileWriter.java @@ -199,12 +199,11 @@ public class SstFileWriter extends RocksObject { return fileSize(nativeHandle_); } - private native static long newSstFileWriter( - final long envOptionsHandle, final long optionsHandle, + private static native long newSstFileWriter(final long envOptionsHandle, final long optionsHandle, final long userComparatorHandle, final byte comparatorType); - private native static long newSstFileWriter(final long envOptionsHandle, - final long optionsHandle); + private static native long newSstFileWriter( + final long envOptionsHandle, final long optionsHandle); private native void open(final long handle, final String filePath) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java index d513c5f15..b1ccf08c1 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -9,11 +9,11 @@ package org.rocksdb; * Fixed prefix factory. It partitions SST files using fixed prefix of the key. */ public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { - public SstPartitionerFixedPrefixFactory(long prefixLength) { + public SstPartitionerFixedPrefixFactory(final long prefixLength) { super(newSstPartitionerFixedPrefixFactory0(prefixLength)); } - private native static long newSstPartitionerFixedPrefixFactory0(long prefixLength); + private static native long newSstPartitionerFixedPrefixFactory0(long prefixLength); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java index 803456bb2..803fa37d9 100644 --- a/java/src/main/java/org/rocksdb/StateType.java +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * The type used to refer to a thread state. - * + *

    * A state describes lower-level action of a thread * such as reading / writing a file or waiting for a mutex. */ diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 0938a6d58..9f3c9a62c 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -31,7 +31,7 @@ public class Statistics extends RocksObject { /** * Intentionally package-private. - * + *

    * Used from {@link DBOptions#statistics()} * * @param existingStatisticsHandle The C++ pointer to an existing statistics object @@ -134,10 +134,11 @@ public class Statistics extends RocksObject { return toString(nativeHandle_); } - private native static long newStatistics(); - private native static long newStatistics(final long otherStatisticsHandle); - private native static long newStatistics(final byte[] ignoreHistograms); - private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle); + private static native long newStatistics(); + private static native long newStatistics(final long otherStatisticsHandle); + private static native long newStatistics(final byte[] ignoreHistograms); + private static native long newStatistics( + final byte[] ignoreHistograms, final long otherStatisticsHandle); @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java index fb3f57150..fd00f85b2 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollector.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -62,48 +62,39 @@ public class StatisticsCollector { } private Runnable collectStatistics() { - return new Runnable() { - - @Override - public void run() { - while (_isRunning) { - try { - if(Thread.currentThread().isInterrupted()) { - break; - } - for(final StatsCollectorInput statsCollectorInput : - _statsCollectorInputList) { - Statistics statistics = statsCollectorInput.getStatistics(); - StatisticsCollectorCallback statsCallback = - statsCollectorInput.getCallback(); + return () -> { + while (_isRunning) { + try { + if (Thread.currentThread().isInterrupted()) { + break; + } + for (final StatsCollectorInput statsCollectorInput : _statsCollectorInputList) { + final Statistics statistics = statsCollectorInput.getStatistics(); + final StatisticsCollectorCallback statsCallback = statsCollectorInput.getCallback(); - // Collect ticker data - for(final TickerType ticker : TickerType.values()) { - if(ticker != TickerType.TICKER_ENUM_MAX) { - final long tickerValue = statistics.getTickerCount(ticker); - statsCallback.tickerCallback(ticker, tickerValue); - } + // Collect ticker data + for (final TickerType ticker : TickerType.values()) { + if (ticker != TickerType.TICKER_ENUM_MAX) { + final long tickerValue = statistics.getTickerCount(ticker); + statsCallback.tickerCallback(ticker, tickerValue); } + } - // Collect histogram data - for(final HistogramType histogramType : HistogramType.values()) { - if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { - final HistogramData histogramData = - statistics.getHistogramData(histogramType); - statsCallback.histogramCallback(histogramType, histogramData); - } + // Collect histogram data + for (final HistogramType histogramType : HistogramType.values()) { + if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { + final HistogramData histogramData = statistics.getHistogramData(histogramType); + statsCallback.histogramCallback(histogramType, histogramData); } } - - Thread.sleep(_statsCollectionInterval); - } - catch (final InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } - catch (final Exception e) { - throw new RuntimeException("Error while calculating statistics", e); } + + Thread.sleep(_statsCollectionInterval); + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } catch (final Exception e) { + throw new RuntimeException("Error while calculating statistics", e); } } }; diff --git a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java index f3785b15f..bed7828e0 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * Callback interface provided to StatisticsCollector. - * + *

    * Thread safety: * StatisticsCollector doesn't make any guarantees about thread safety. * If the same reference of StatisticsCollectorCallback is passed to multiple diff --git a/java/src/main/java/org/rocksdb/StatsLevel.java b/java/src/main/java/org/rocksdb/StatsLevel.java index 58504b84a..8190e503a 100644 --- a/java/src/main/java/org/rocksdb/StatsLevel.java +++ b/java/src/main/java/org/rocksdb/StatsLevel.java @@ -23,7 +23,7 @@ public enum StatsLevel { /** * Collect all stats, including measuring duration of mutex operations. - * + *

    * If getting time is expensive on the platform to run, it can * reduce scalability to more threads, especially for writes. */ diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index 033ed3ea1..5c50e700f 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -9,7 +9,7 @@ import java.util.Objects; /** * Represents the status returned by a function call in RocksDB. - * + *

    * Currently only used with {@link RocksDBException} when the * status is not {@link Code#Ok} */ @@ -139,12 +139,12 @@ public class Status { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - Status status = (Status) o; + final Status status = (Status) o; return code == status.code && subCode == status.subCode && Objects.equals(state, status.state); } diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java index ddbccff46..547371e7c 100644 --- a/java/src/main/java/org/rocksdb/StringAppendOperator.java +++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -11,19 +11,19 @@ package org.rocksdb; * two strings. */ public class StringAppendOperator extends MergeOperator { - public StringAppendOperator() { - this(','); - } + public StringAppendOperator() { + this(','); + } - public StringAppendOperator(char delim) { - super(newSharedStringAppendOperator(delim)); - } + public StringAppendOperator(final char delim) { + super(newSharedStringAppendOperator(delim)); + } - public StringAppendOperator(String delim) { - super(newSharedStringAppendOperator(delim)); - } + public StringAppendOperator(final String delim) { + super(newSharedStringAppendOperator(delim)); + } - private native static long newSharedStringAppendOperator(final char delim); - private native static long newSharedStringAppendOperator(final String delim); - @Override protected final native void disposeInternal(final long handle); + private static native long newSharedStringAppendOperator(final char delim); + private static native long newSharedStringAppendOperator(final String delim); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java index 5a383ade4..8dc56796a 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java @@ -82,12 +82,12 @@ public class TableFileCreationBriefInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o; + final TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o; return jobId == that.jobId && Objects.equals(dbName, that.dbName) && Objects.equals(columnFamilyName, that.columnFamilyName) && Objects.equals(filePath, that.filePath) && reason == that.reason; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java index 7742f32f1..5654603c3 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java @@ -62,12 +62,12 @@ public class TableFileCreationInfo extends TableFileCreationBriefInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - TableFileCreationInfo that = (TableFileCreationInfo) o; + final TableFileCreationInfo that = (TableFileCreationInfo) o; return fileSize == that.fileSize && Objects.equals(tableProperties, that.tableProperties) && Objects.equals(status, that.status); } diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java index 8aad03ae8..9a777e333 100644 --- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java @@ -62,12 +62,12 @@ public class TableFileDeletionInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - TableFileDeletionInfo that = (TableFileDeletionInfo) o; + final TableFileDeletionInfo that = (TableFileDeletionInfo) o; return jobId == that.jobId && Objects.equals(dbName, that.dbName) && Objects.equals(filePath, that.filePath) && Objects.equals(status, that.status); } diff --git a/java/src/main/java/org/rocksdb/TableFormatConfig.java b/java/src/main/java/org/rocksdb/TableFormatConfig.java index dbe524c42..726c6f122 100644 --- a/java/src/main/java/org/rocksdb/TableFormatConfig.java +++ b/java/src/main/java/org/rocksdb/TableFormatConfig.java @@ -18,5 +18,5 @@ public abstract class TableFormatConfig { * * @return native handle address to native table instance. */ - abstract protected long newTableFactoryHandle(); + protected abstract long newTableFactoryHandle(); } diff --git a/java/src/main/java/org/rocksdb/TableProperties.java b/java/src/main/java/org/rocksdb/TableProperties.java index 096341a4c..02b95608e 100644 --- a/java/src/main/java/org/rocksdb/TableProperties.java +++ b/java/src/main/java/org/rocksdb/TableProperties.java @@ -380,12 +380,12 @@ public class TableProperties { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - TableProperties that = (TableProperties) o; + final TableProperties that = (TableProperties) o; return dataSize == that.dataSize && indexSize == that.indexSize && indexPartitions == that.indexPartitions && topLevelIndexSize == that.topLevelIndexSize && indexKeyIsUserKey == that.indexKeyIsUserKey diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java index 062df5889..38e7fad9c 100644 --- a/java/src/main/java/org/rocksdb/ThreadStatus.java +++ b/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -15,7 +15,7 @@ public class ThreadStatus { private final OperationType operationType; private final long operationElapsedTime; // microseconds private final OperationStage operationStage; - private final long operationProperties[]; + private final long[] operationProperties; private final StateType stateType; /** @@ -113,7 +113,7 @@ public class ThreadStatus { /** * Get the list of properties that describe some details about the current * operation. - * + *

    * Each field in might have different meanings for different operations. * * @return the properties diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index f100bb277..c167f74c4 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * The logical mapping of tickers defined in rocksdb::Tickers. - * + *

    * Java byte value mappings don't align 1:1 to the c++ values. c++ rocksdb::Tickers enumeration type * is uint32_t and java org.rocksdb.TickerType is byte, this causes mapping issues when * rocksdb::Tickers value is greater then 127 (0x7F) for jbyte jni interface as range greater is not diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index b2cc8a932..7d61a208e 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -11,7 +11,7 @@ import java.util.List; /** * Provides BEGIN/COMMIT/ROLLBACK transactions. - * + *

    * To use transactions, you must first create either an * {@link OptimisticTransactionDB} or a {@link TransactionDB} * @@ -20,7 +20,7 @@ import java.util.List; * {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)} * * It is up to the caller to synchronize access to this object. - * + *

    * See samples/src/main/java/OptimisticTransactionSample.java and * samples/src/main/java/TransactionSample.java for some simple * examples. @@ -50,22 +50,22 @@ public class Transaction extends RocksObject { * any keys successfully written (or fetched via {@link #getForUpdate}) have * not been modified outside of this transaction since the time the snapshot * was set. - * + *

    * If a snapshot has not been set, the transaction guarantees that keys have * not been modified since the time each key was first written (or fetched via * {@link #getForUpdate}). - * - * Using {@link #setSnapshot()} will provide stricter isolation guarantees + *

    + * Using {@code #setSnapshot()} will provide stricter isolation guarantees * at the expense of potentially more transaction failures due to conflicts * with other writes. - * - * Calling {@link #setSnapshot()} has no effect on keys written before this + *

    + * Calling {@code #setSnapshot()} has no effect on keys written before this * function has been called. - * - * {@link #setSnapshot()} may be called multiple times if you would like to + *

    + * {@code #setSnapshot()} may be called multiple times if you would like to * change the snapshot used for different operations in this transaction. - * - * Calling {@link #setSnapshot()} will not affect the version of Data returned + *

    + * Calling {@code #setSnapshot()} will not affect the version of Data returned * by get(...) methods. See {@link #get} for more details. */ public void setSnapshot() { @@ -79,19 +79,19 @@ public class Transaction extends RocksObject { * By calling this function, the transaction will essentially call * {@link #setSnapshot()} for you right before performing the next * write/getForUpdate. - * - * Calling {@link #setSnapshotOnNextOperation()} will not affect what + *

    + * Calling {@code #setSnapshotOnNextOperation()} will not affect what * snapshot is returned by {@link #getSnapshot} until the next * write/getForUpdate is executed. - * + *

    * When the snapshot is created the notifier's snapshotCreated method will * be called so that the caller can get access to the snapshot. - * + *

    * This is an optimization to reduce the likelihood of conflicts that * could occur in between the time {@link #setSnapshot()} is called and the * first write/getForUpdate operation. i.e. this prevents the following * race-condition: - * + *

    * txn1->setSnapshot(); * txn2->put("A", ...); * txn2->commit(); @@ -108,20 +108,20 @@ public class Transaction extends RocksObject { * By calling this function, the transaction will essentially call * {@link #setSnapshot()} for you right before performing the next * write/getForUpdate. - * + *

    * Calling {@link #setSnapshotOnNextOperation()} will not affect what * snapshot is returned by {@link #getSnapshot} until the next * write/getForUpdate is executed. - * + *

    * When the snapshot is created the * {@link AbstractTransactionNotifier#snapshotCreated(Snapshot)} method will * be called so that the caller can get access to the snapshot. - * + *

    * This is an optimization to reduce the likelihood of conflicts that * could occur in between the time {@link #setSnapshot()} is called and the * first write/getForUpdate operation. i.e. this prevents the following * race-condition: - * + *

    * txn1->setSnapshot(); * txn2->put("A", ...); * txn2->commit(); @@ -137,38 +137,37 @@ public class Transaction extends RocksObject { setSnapshotOnNextOperation(nativeHandle_, transactionNotifier.nativeHandle_); } - /** - * Returns the Snapshot created by the last call to {@link #setSnapshot()}. - * - * REQUIRED: The returned Snapshot is only valid up until the next time - * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called, - * {@link #clearSnapshot()} is called, or the Transaction is deleted. - * - * @return The snapshot or null if there is no snapshot - */ + /** + * Returns the Snapshot created by the last call to {@link #setSnapshot()}. + *

    + * REQUIRED: The returned Snapshot is only valid up until the next time + * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called, + * {@link #clearSnapshot()} is called, or the Transaction is deleted. + * + * @return The snapshot or null if there is no snapshot + */ public Snapshot getSnapshot() { assert(isOwningHandle()); final long snapshotNativeHandle = getSnapshot(nativeHandle_); if(snapshotNativeHandle == 0) { return null; } else { - final Snapshot snapshot = new Snapshot(snapshotNativeHandle); - return snapshot; + return new Snapshot(snapshotNativeHandle); } } /** * Clears the current snapshot (i.e. no snapshot will be 'set') - * + *

    * This removes any snapshot that currently exists or is set to be created * on the next update operation ({@link #setSnapshotOnNextOperation()}). - * - * Calling {@link #clearSnapshot()} has no effect on keys written before this + *

    + * Calling {@code #clearSnapshot()} has no effect on keys written before this * function has been called. - * + *

    * If a reference to a snapshot was retrieved via {@link #getSnapshot()}, it * will no longer be valid and should be discarded after a call to - * {@link #clearSnapshot()}. + * {@code #clearSnapshot()}. */ public void clearSnapshot() { assert(isOwningHandle()); @@ -186,17 +185,17 @@ public class Transaction extends RocksObject { /** * Write all batched keys to the db atomically. - * + *

    * Returns OK on success. - * + *

    * May return any error status that could be returned by DB:Write(). - * + *

    * If this transaction was created by an {@link OptimisticTransactionDB} * Status::Busy() may be returned if the transaction could not guarantee * that there are no write conflicts. Status::TryAgain() may be returned * if the memtable history size is not large enough * (See max_write_buffer_number_to_maintain). - * + *

    * If this transaction was created by a {@link TransactionDB}, * Status::Expired() may be returned if this transaction has lived for * longer than {@link TransactionOptions#getExpiration()}. @@ -221,7 +220,7 @@ public class Transaction extends RocksObject { /** * Records the state of the transaction for future calls to * {@link #rollbackToSavePoint()}. - * + *

    * May be called multiple times to set multiple save points. * * @throws RocksDBException if an error occurs whilst setting a save point @@ -235,7 +234,7 @@ public class Transaction extends RocksObject { * Undo all operations in this transaction (put, merge, delete, putLogData) * since the most recent call to {@link #setSavePoint()} and removes the most * recent {@link #setSavePoint()}. - * + *

    * If there is no previous call to {@link #setSavePoint()}, * returns Status::NotFound() * @@ -252,11 +251,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -285,11 +284,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -316,11 +315,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -367,11 +366,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -417,11 +416,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -454,11 +453,11 @@ public class Transaction extends RocksObject { * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

    * If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

    * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -489,22 +488,22 @@ public class Transaction extends RocksObject { * transaction after it has first been read (or after the snapshot if a * snapshot is set in this transaction). The transaction behavior is the * same regardless of whether the key exists or not. - * + *

    * Note: Currently, this function will return Status::MergeInProgress * if the most recent write to the queried key in this batch is a Merge. - * + *

    * The values returned by this function are similar to * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. * If value==nullptr, then this function will not read any data, but will * still ensure that this key cannot be written to by outside of this * transaction. - * + *

    * If this transaction was created by an {@link OptimisticTransactionDB}, * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} * could cause {@link #commit()} to fail. Otherwise, it could return any error * that could be returned by * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. - * + *

    * If this transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -570,22 +569,22 @@ public class Transaction extends RocksObject { * transaction after it has first been read (or after the snapshot if a * snapshot is set in this transaction). The transaction behavior is the * same regardless of whether the key exists or not. - * + *

    * Note: Currently, this function will return Status::MergeInProgress * if the most recent write to the queried key in this batch is a Merge. - * + *

    * The values returned by this function are similar to * {@link RocksDB#get(ReadOptions, byte[])}. * If value==nullptr, then this function will not read any data, but will * still ensure that this key cannot be written to by outside of this * transaction. - * + *

    * If this transaction was created on an {@link OptimisticTransactionDB}, * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} * could cause {@link #commit()} to fail. Otherwise, it could return any error * that could be returned by * {@link RocksDB#get(ReadOptions, byte[])}. - * + *

    * If this transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -618,7 +617,7 @@ public class Transaction extends RocksObject { /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - * + *

    * * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} @@ -655,7 +654,7 @@ public class Transaction extends RocksObject { /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - * + *

    * * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} @@ -691,7 +690,7 @@ public class Transaction extends RocksObject { /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - * + *

    * * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -715,7 +714,7 @@ public class Transaction extends RocksObject { /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - * + *

    * * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -741,14 +740,14 @@ public class Transaction extends RocksObject { * Returns an iterator that will iterate on all keys in the default * column family including both keys in the DB and uncommitted keys in this * transaction. - * + *

    * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be * fetched regardless). - * + *

    * Caller is responsible for deleting the returned Iterator. - * + *

    * The returned iterator is only valid until {@link #commit()}, * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. * @@ -766,15 +765,15 @@ public class Transaction extends RocksObject { * Returns an iterator that will iterate on all keys in the column family * specified by {@code columnFamilyHandle} including both keys in the DB * and uncommitted keys in this transaction. - * + *

    * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be * fetched regardless). - * + *

    * Caller is responsible for calling {@link RocksIterator#close()} on * the returned Iterator. - * + *

    * The returned iterator is only valid until {@link #commit()}, * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. * @@ -794,10 +793,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -829,12 +828,12 @@ public class Transaction extends RocksObject { /** * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[], boolean)} * but with {@code assumeTracked = false}. - * + *

    * Will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -861,10 +860,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -915,7 +914,7 @@ public class Transaction extends RocksObject { /** * Similar to {@link #put(ColumnFamilyHandle, byte[][], byte[][], boolean)} * but with with {@code assumeTracked = false}. - * + *

    * Allows you to specify the key and value in several parts that will be * concatenated together. * @@ -956,10 +955,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -992,12 +991,12 @@ public class Transaction extends RocksObject { /** * Similar to {@link #merge(ColumnFamilyHandle, byte[], byte[], boolean)} * but with {@code assumeTracked = false}. - * + *

    * Will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1024,10 +1023,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1052,10 +1051,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1086,12 +1085,12 @@ public class Transaction extends RocksObject { /** * Similar to {@link #delete(ColumnFamilyHandle, byte[], boolean)} * but with {@code assumeTracked = false}. - * + *

    * Will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1117,10 +1116,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#delete(byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1168,7 +1167,7 @@ public class Transaction extends RocksObject { /** * Similar to{@link #delete(ColumnFamilyHandle, byte[][], boolean)} * but with {@code assumeTracked = false}. - * + *

    * Allows you to specify the key in several parts that will be * concatenated together. * @@ -1204,10 +1203,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#singleDelete(ColumnFamilyHandle, byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1239,12 +1238,12 @@ public class Transaction extends RocksObject { /** * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[], boolean)} * but with {@code assumeTracked = false}. - * + *

    * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1271,10 +1270,10 @@ public class Transaction extends RocksObject { /** * Similar to {@link RocksDB#singleDelete(byte[])}, but * will also perform conflict checking on the keys be written. - * + *

    * If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

    * If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1324,7 +1323,7 @@ public class Transaction extends RocksObject { /** * Similar to{@link #singleDelete(ColumnFamilyHandle, byte[][], boolean)} * but with {@code assumeTracked = false}. - * + *

    * Allows you to specify the key in several parts that will be * concatenated together. * @@ -1363,10 +1362,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #put(ColumnFamilyHandle, byte[], byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1390,10 +1389,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#put(byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #put(byte[], byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1455,10 +1454,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1481,10 +1480,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#merge(byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #merge(byte[], byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1506,10 +1505,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #delete(ColumnFamilyHandle, byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1532,10 +1531,10 @@ public class Transaction extends RocksObject { * Similar to {@link RocksDB#delete(byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

    * Unlike {@link #delete(byte[])} no conflict * checking will be performed for this key. - * + *

    * If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -1600,13 +1599,13 @@ public class Transaction extends RocksObject { * By default, all put/merge/delete operations will be indexed in the * transaction so that get/getForUpdate/getIterator can search for these * keys. - * + *

    * If the caller does not want to fetch the keys about to be written, * they may want to avoid indexing as a performance optimization. - * Calling {@link #disableIndexing()} will turn off indexing for all future + * Calling {@code #disableIndexing()} will turn off indexing for all future * put/merge/delete operations until {@link #enableIndexing()} is called. - * - * If a key is put/merge/deleted after {@link #disableIndexing()} is called + *

    + * If a key is put/merge/deleted after {@code #disableIndexing()} is called * and then is fetched via get/getForUpdate/getIterator, the result of the * fetch is undefined. */ @@ -1684,7 +1683,7 @@ public class Transaction extends RocksObject { /** * Fetch the underlying write batch that contains all pending changes to be * committed. - * + *

    * Note: You should not write or delete anything from the batch directly and * should only use the functions in the {@link Transaction} class to * write to this transaction. @@ -1693,15 +1692,13 @@ public class Transaction extends RocksObject { */ public WriteBatchWithIndex getWriteBatch() { assert(isOwningHandle()); - final WriteBatchWithIndex writeBatchWithIndex = - new WriteBatchWithIndex(getWriteBatch(nativeHandle_)); - return writeBatchWithIndex; + return new WriteBatchWithIndex(getWriteBatch(nativeHandle_)); } /** * Change the value of {@link TransactionOptions#getLockTimeout()} * (in milliseconds) for this transaction. - * + *

    * Has no effect on OptimisticTransactions. * * @param lockTimeout the timeout (in milliseconds) for locks used by this @@ -1719,9 +1716,7 @@ public class Transaction extends RocksObject { */ public WriteOptions getWriteOptions() { assert(isOwningHandle()); - final WriteOptions writeOptions = - new WriteOptions(getWriteOptions(nativeHandle_)); - return writeOptions; + return new WriteOptions(getWriteOptions(nativeHandle_)); } /** @@ -1738,28 +1733,28 @@ public class Transaction extends RocksObject { * If this key was previously fetched in this transaction using * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling - * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell + * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell * the transaction that it no longer needs to do any conflict checking * for this key. - * + *

    * If a key has been fetched N times via * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then - * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an + * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an * effect if it is also called N times. If this key has been written to in - * this transaction, {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} + * this transaction, {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} * will have no effect. - * + *

    * If {@link #setSavePoint()} has been called after the * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}, - * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any + * {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any * effect. - * + *

    * If this Transaction was created by an {@link OptimisticTransactionDB}, - * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect + * calling {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect * whether this key is conflict checked at commit time. * If this Transaction was created by a {@link TransactionDB}, - * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release + * calling {@code #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release * any held locks for this key. * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} @@ -1776,28 +1771,28 @@ public class Transaction extends RocksObject { * If this key was previously fetched in this transaction using * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling - * {@link #undoGetForUpdate(byte[])} will tell + * {@code #undoGetForUpdate(byte[])} will tell * the transaction that it no longer needs to do any conflict checking * for this key. - * + *

    * If a key has been fetched N times via * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then - * {@link #undoGetForUpdate(byte[])} will only have an + * {@code #undoGetForUpdate(byte[])} will only have an * effect if it is also called N times. If this key has been written to in - * this transaction, {@link #undoGetForUpdate(byte[])} + * this transaction, {@code #undoGetForUpdate(byte[])} * will have no effect. - * + *

    * If {@link #setSavePoint()} has been called after the * {@link #getForUpdate(ReadOptions, byte[], boolean)}, - * {@link #undoGetForUpdate(byte[])} will not have any + * {@code #undoGetForUpdate(byte[])} will not have any * effect. - * + *

    * If this Transaction was created by an {@link OptimisticTransactionDB}, - * calling {@link #undoGetForUpdate(byte[])} can affect + * calling {@code #undoGetForUpdate(byte[])} can affect * whether this key is conflict checked at commit time. * If this Transaction was created by a {@link TransactionDB}, - * calling {@link #undoGetForUpdate(byte[])} may release + * calling {@code #undoGetForUpdate(byte[])} may release * any held locks for this key. * * @param key the key to retrieve the value for. @@ -1828,9 +1823,7 @@ public class Transaction extends RocksObject { */ public WriteBatch getCommitTimeWriteBatch() { assert(isOwningHandle()); - final WriteBatch writeBatch = - new WriteBatch(getCommitTimeWriteBatch(nativeHandle_)); - return writeBatch; + return new WriteBatch(getCommitTimeWriteBatch(nativeHandle_)); } /** @@ -1908,7 +1901,7 @@ public class Transaction extends RocksObject { /** * Get the execution status of the transaction. - * + *

    * NOTE: The execution status of an Optimistic Transaction * never changes. This is only useful for non-optimistic transactions! * @@ -2045,11 +2038,10 @@ public class Transaction extends RocksObject { private native void setSavePoint(final long handle) throws RocksDBException; private native void rollbackToSavePoint(final long handle) throws RocksDBException; - private native byte[] get(final long handle, final long readOptionsHandle, - final byte key[], final int keyLength, final long columnFamilyHandle) - throws RocksDBException; - private native byte[] get(final long handle, final long readOptionsHandle, - final byte key[], final int keyLen) throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, final byte[] key, + final int keyLength, final long columnFamilyHandle) throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, final byte[] key, + final int keyLen) throws RocksDBException; private native byte[][] multiGet(final long handle, final long readOptionsHandle, final byte[][] keys, final long[] columnFamilyHandles) throws RocksDBException; @@ -2057,10 +2049,10 @@ public class Transaction extends RocksObject { final long readOptionsHandle, final byte[][] keys) throws RocksDBException; private native byte[] getForUpdate(final long handle, final long readOptionsHandle, - final byte key[], final int keyLength, final long columnFamilyHandle, final boolean exclusive, + final byte[] key, final int keyLength, final long columnFamilyHandle, final boolean exclusive, final boolean doValidate) throws RocksDBException; private native byte[] getForUpdate(final long handle, final long readOptionsHandle, - final byte key[], final int keyLen, final boolean exclusive, final boolean doValidate) + final byte[] key, final int keyLen, final boolean exclusive, final boolean doValidate) throws RocksDBException; private native byte[][] multiGetForUpdate(final long handle, final long readOptionsHandle, final byte[][] keys, diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java index 86f25fe15..105f4eff0 100644 --- a/java/src/main/java/org/rocksdb/TransactionDB.java +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -106,12 +106,12 @@ public class TransactionDB extends RocksDB /** * This is similar to {@link #close()} except that it * throws an exception if any error occurs. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. * * @throws RocksDBException if an error occurs whilst closing. @@ -129,12 +129,12 @@ public class TransactionDB extends RocksDB /** * This is similar to {@link #closeE()} except that it * silently ignores any errors. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. */ @Override @@ -233,8 +233,7 @@ public class TransactionDB extends RocksDB private final long[] transactionIDs; private final boolean exclusive; - public KeyLockInfo(final String key, final long transactionIDs[], - final boolean exclusive) { + public KeyLockInfo(final String key, final long[] transactionIDs, final boolean exclusive) { this.key = key; this.transactionIDs = transactionIDs; this.exclusive = exclusive; @@ -381,8 +380,7 @@ public class TransactionDB extends RocksDB private static native long[] open(final long dbOptionsHandle, final long transactionDbOptionsHandle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); - private native static void closeDatabase(final long handle) - throws RocksDBException; + private static native void closeDatabase(final long handle) throws RocksDBException; private native long beginTransaction(final long handle, final long writeOptionsHandle); private native long beginTransaction(final long handle, diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java index 7f4296a7c..391025d6a 100644 --- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -14,8 +14,8 @@ public class TransactionDBOptions extends RocksObject { /** * Specifies the maximum number of keys that can be locked at the same time * per column family. - * - * If the number of locked keys is greater than {@link #getMaxNumLocks()}, + *

    + * If the number of locked keys is greater than {@code #getMaxNumLocks()}, * transaction writes (or GetForUpdate) will return an error. * * @return The maximum number of keys that can be locked @@ -28,7 +28,7 @@ public class TransactionDBOptions extends RocksObject { /** * Specifies the maximum number of keys that can be locked at the same time * per column family. - * + *

    * If the number of locked keys is greater than {@link #getMaxNumLocks()}, * transaction writes (or GetForUpdate) will return an error. * @@ -57,7 +57,7 @@ public class TransactionDBOptions extends RocksObject { * Increasing this value will increase the concurrency by dividing the lock * table (per column family) into more sub-tables, each with their own * separate mutex. - * + *

    * Default: 16 * * @param numStripes The number of sub-tables @@ -94,7 +94,7 @@ public class TransactionDBOptions extends RocksObject { * If negative, there is no timeout. Not using a timeout is not recommended * as it can lead to deadlocks. Currently, there is no deadlock-detection to * recover from a deadlock. - * + *

    * Default: 1000 * * @param transactionLockTimeout the default wait timeout in milliseconds @@ -113,7 +113,7 @@ public class TransactionDBOptions extends RocksObject { * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} * directly). - * + *

    * If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, there is no timeout and will block indefinitely when acquiring * a lock. @@ -131,29 +131,28 @@ public class TransactionDBOptions extends RocksObject { * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} * directly). - * + *

    * If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, there is no timeout and will block indefinitely when acquiring * a lock. - * + *

    * Not using a timeout can lead to deadlocks. Currently, there * is no deadlock-detection to recover from a deadlock. While DB writes * cannot deadlock with other DB writes, they can deadlock with a transaction. * A negative timeout should only be used if all transactions have a small * expiration set. - * + *

    * Default: 1000 * * @param defaultLockTimeout the timeout in milliseconds when writing a key * OUTSIDE of a transaction * @return this TransactionDBOptions instance */ - public TransactionDBOptions setDefaultLockTimeout( - final long defaultLockTimeout) { - assert(isOwningHandle()); - setDefaultLockTimeout(nativeHandle_, defaultLockTimeout); - return this; - } + public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) { + assert (isOwningHandle()); + setDefaultLockTimeout(nativeHandle_, defaultLockTimeout); + return this; + } // /** // * If set, the {@link TransactionDB} will use this implementation of a mutex @@ -199,7 +198,7 @@ public class TransactionDBOptions extends RocksObject { return this; } - private native static long newTransactionDBOptions(); + private static native long newTransactionDBOptions(); private native long getMaxNumLocks(final long handle); private native void setMaxNumLocks(final long handle, final long maxNumLocks); diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java index 195fc85e4..f93d3cb3c 100644 --- a/java/src/main/java/org/rocksdb/TransactionOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -54,7 +54,7 @@ public class TransactionOptions extends RocksObject /** * The wait timeout in milliseconds when a transaction attempts to lock a key. - * + *

    * If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} * will be used @@ -69,11 +69,11 @@ public class TransactionOptions extends RocksObject /** * If positive, specifies the wait timeout in milliseconds when * a transaction attempts to lock a key. - * + *

    * If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} * will be used - * + *

    * Default: -1 * * @param lockTimeout the lock timeout in milliseconds @@ -88,7 +88,7 @@ public class TransactionOptions extends RocksObject /** * Expiration duration in milliseconds. - * + *

    * If non-negative, transactions that last longer than this many milliseconds * will fail to commit. If not set, a forgotten transaction that is never * committed, rolled back, or deleted will never relinquish any locks it @@ -103,12 +103,12 @@ public class TransactionOptions extends RocksObject /** * Expiration duration in milliseconds. - * + *

    * If non-negative, transactions that last longer than this many milliseconds * will fail to commit. If not set, a forgotten transaction that is never * committed, rolled back, or deleted will never relinquish any locks it * holds. This could prevent keys from being written by other writers. - * + *

    * Default: -1 * * @param expiration the expiration duration in milliseconds @@ -133,7 +133,7 @@ public class TransactionOptions extends RocksObject /** * Sets the number of traversals to make during deadlock detection. - * + *

    * Default: 50 * * @param deadlockDetectDepth the number of traversals to make during @@ -168,7 +168,7 @@ public class TransactionOptions extends RocksObject return this; } - private native static long newTransactionOptions(); + private static native long newTransactionOptions(); private native boolean isSetSnapshot(final long handle); private native void setSetSnapshot(final long handle, final boolean setSnapshot); diff --git a/java/src/main/java/org/rocksdb/TransactionalDB.java b/java/src/main/java/org/rocksdb/TransactionalDB.java index 740181989..1ba955496 100644 --- a/java/src/main/java/org/rocksdb/TransactionalDB.java +++ b/java/src/main/java/org/rocksdb/TransactionalDB.java @@ -8,7 +8,7 @@ package org.rocksdb; interface TransactionalDB> extends AutoCloseable { /** * Starts a new Transaction. - * + *

    * Caller is responsible for calling {@link #close()} on the returned * transaction when it is no longer needed. * @@ -19,7 +19,7 @@ interface TransactionalDB> extends AutoCloseab /** * Starts a new Transaction. - * + *

    * Caller is responsible for calling {@link #close()} on the returned * transaction when it is no longer needed. * @@ -32,7 +32,7 @@ interface TransactionalDB> extends AutoCloseab /** * Starts a new Transaction. - * + *

    * Caller is responsible for calling {@link #close()} on the returned * transaction when it is no longer needed. * @@ -48,7 +48,7 @@ interface TransactionalDB> extends AutoCloseab /** * Starts a new Transaction. - * + *

    * Caller is responsible for calling {@link #close()} on the returned * transaction when it is no longer needed. * diff --git a/java/src/main/java/org/rocksdb/TransactionalOptions.java b/java/src/main/java/org/rocksdb/TransactionalOptions.java index d55ee900c..2175693fd 100644 --- a/java/src/main/java/org/rocksdb/TransactionalOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionalOptions.java @@ -20,7 +20,7 @@ interface TransactionalOptions> /** * Setting the setSnapshot to true is the same as calling * {@link Transaction#setSnapshot()}. - * + *

    * Default: false * * @param setSnapshot Whether to set a snapshot diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index a7adaf4b2..2bb0c4333 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -125,7 +125,7 @@ public class TtlDB extends RocksDB { cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; } - final int ttlVals[] = new int[ttlValues.size()]; + final int[] ttlVals = new int[ttlValues.size()]; for(int i = 0; i < ttlValues.size(); i++) { ttlVals[i] = ttlValues.get(i); } @@ -144,12 +144,12 @@ public class TtlDB extends RocksDB { * * This is similar to {@link #close()} except that it * throws an exception if any error occurs. - * + *

    * This will not fsync the WAL files. * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. * * @throws RocksDBException if an error occurs whilst closing. @@ -172,7 +172,7 @@ public class TtlDB extends RocksDB { * If syncing is required, the caller must first call {@link #syncWal()} * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch * with {@link WriteOptions#setSync(boolean)} set to true. - * + *

    * See also {@link #close()}. */ @Override @@ -230,16 +230,13 @@ public class TtlDB extends RocksDB { @Override protected native void disposeInternal(final long handle); - private native static long open(final long optionsHandle, - final String db_path, final int ttl, final boolean readOnly) - throws RocksDBException; - private native static long[] openCF(final long optionsHandle, - final String db_path, final byte[][] columnFamilyNames, - final long[] columnFamilyOptions, final int[] ttlValues, + private static native long open(final long optionsHandle, final String db_path, final int ttl, + final boolean readOnly) throws RocksDBException; + private static native long[] openCF(final long optionsHandle, final String db_path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions, final int[] ttlValues, final boolean readOnly) throws RocksDBException; private native long createColumnFamilyWithTtl(final long handle, final byte[] columnFamilyName, final long columnFamilyOptions, int ttl) throws RocksDBException; - private native static void closeDatabase(final long handle) - throws RocksDBException; + private static native void closeDatabase(final long handle) throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java index 837ce6157..28cb8556b 100644 --- a/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java +++ b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java @@ -23,7 +23,7 @@ public enum TxnDBWritePolicy { */ WRITE_UNPREPARED((byte)0x2); - private byte value; + private final byte value; TxnDBWritePolicy(final byte value) { this.value = value; diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java index cce9b298d..0cffdce8c 100644 --- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java +++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -14,6 +14,6 @@ public class UInt64AddOperator extends MergeOperator { super(newSharedUInt64AddOperator()); } - private native static long newSharedUInt64AddOperator(); + private static native long newSharedUInt64AddOperator(); @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java index d8b9eeced..b8c098f94 100644 --- a/java/src/main/java/org/rocksdb/WALRecoveryMode.java +++ b/java/src/main/java/org/rocksdb/WALRecoveryMode.java @@ -9,10 +9,9 @@ package org.rocksdb; * The WAL Recover Mode */ public enum WALRecoveryMode { - /** * Original levelDB recovery - * + *

    * We tolerate incomplete record in trailing data on all logs * Use case : This is legacy behavior (default) */ @@ -20,7 +19,7 @@ public enum WALRecoveryMode { /** * Recover from clean shutdown - * + *

    * We don't expect to find any corruption in the WAL * Use case : This is ideal for unit tests and rare applications that * can require high consistency guarantee @@ -44,7 +43,7 @@ public enum WALRecoveryMode { */ SkipAnyCorruptedRecords((byte)0x03); - private byte value; + private final byte value; WALRecoveryMode(final byte value) { this.value = value; diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index ce146eb3f..e0b99b1b5 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -18,12 +18,12 @@ public class WBWIRocksIterator /** * Get the current entry - * + *

    * The WriteEntry is only valid * until the iterator is repositioned. * If you want to keep the WriteEntry across iterator * movements, you must make a copy of its data! - * + *

    * Note - This method is not thread-safe with respect to the WriteEntry * as it performs a non-atomic update across the fields of the WriteEntry * diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java index 37e36213a..a2836634a 100644 --- a/java/src/main/java/org/rocksdb/WalFilter.java +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -12,13 +12,12 @@ import java.util.Map; * records or modify their processing on recovery. */ public interface WalFilter { - /** * Provide ColumnFamily->LogNumber map to filter * so that filter can determine whether a log number applies to a given * column family (i.e. that log hasn't been flushed to SST already for the * column family). - * + *

    * We also pass in name>id map as only name is known during * recovery (as handles are opened post-recovery). * while write batch callbacks happen in terms of column family id. diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java index 889602edc..3a9c2be0e 100644 --- a/java/src/main/java/org/rocksdb/WalProcessingOption.java +++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -6,7 +6,7 @@ package org.rocksdb; public enum WalProcessingOption { - /** + /* * Continue processing as usual. */ CONTINUE_PROCESSING((byte)0x0), diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 9b46108d0..49e1f7f20 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -9,16 +9,16 @@ import java.nio.ByteBuffer; /** * WriteBatch holds a collection of updates to apply atomically to a DB. - * + *

    * The updates are applied in the order in which they are added * to the WriteBatch. For example, the value of "key" will be "v3" * after the following batch is written: - * + *

    * batch.put("key", "v1"); * batch.remove("key"); * batch.put("key", "v2"); * batch.put("key", "v3"); - * + *

    * Multiple threads can invoke const methods on a WriteBatch without * external synchronization, but if any of the threads may call a * non-const method, all threads accessing the same WriteBatch must use @@ -180,7 +180,7 @@ public class WriteBatch extends AbstractWriteBatch { /** * Gets the WAL termination point. - * + *

    * See {@link #markWalTerminationPoint()} * * @return the WAL termination point @@ -260,9 +260,8 @@ public class WriteBatch extends AbstractWriteBatch { @Override final native void setMaxBytes(final long nativeHandle, final long maxBytes); - private native static long newWriteBatch(final int reserved_bytes); - private native static long newWriteBatch(final byte[] serialized, - final int serializedLength); + private static native long newWriteBatch(final int reserved_bytes); + private static native long newWriteBatch(final byte[] serialized, final int serializedLength); private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; private native byte[] data(final long nativeHandle) throws RocksDBException; @@ -282,10 +281,9 @@ public class WriteBatch extends AbstractWriteBatch { /** * Handler callback for iterating over the contents of a batch. */ - public static abstract class Handler - extends RocksCallbackObject { + public abstract static class Handler extends RocksCallbackObject { public Handler() { - super(null); + super(0L); } @Override diff --git a/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/java/src/main/java/org/rocksdb/WriteBatchInterface.java index 92caa22b3..32cd8d1e7 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchInterface.java +++ b/java/src/main/java/org/rocksdb/WriteBatchInterface.java @@ -136,12 +136,12 @@ public interface WriteBatchInterface { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or @@ -160,12 +160,12 @@ public interface WriteBatchInterface { * Remove the database entry for {@code key}. Requires that the key exists * and was not overwritten. It is not an error if the key did not exist * in the database. - * + *

    * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple * times), then the result of calling SingleDelete() on this key is undefined. * SingleDelete() only behaves correctly if there has been only one Put() * for this key since the previous call to SingleDelete() for this key. - * + *

    * This feature is currently an experimental performance optimization * for a very specific workload. It is up to the caller to ensure that * SingleDelete is only used for a key that is not deleted using Delete() or @@ -186,7 +186,7 @@ public interface WriteBatchInterface { * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -203,7 +203,7 @@ public interface WriteBatchInterface { * Removes the database entries in the range ["beginKey", "endKey"), i.e., * including "beginKey" and excluding "endKey". a non-OK status on error. It * is not an error if no keys exist in the range ["beginKey", "endKey"). - * + *

    * Delete the database entry (if any) for "key". Returns OK on success, and a * non-OK status on error. It is not an error if "key" did not exist in the * database. @@ -224,9 +224,9 @@ public interface WriteBatchInterface { * it will not be persisted to the SST files. When iterating over this * WriteBatch, WriteBatch::Handler::LogData will be called with the contents * of the blob as it is encountered. Blobs, puts, deletes, and merges will be - * encountered in the same order in thich they were inserted. The blob will + * encountered in the same order in which they were inserted. The blob will * NOT consume sequence number(s) and will NOT increase the count of the batch - * + *

    * Example application: add timestamps to the transaction log for use in * replication. * @@ -257,7 +257,7 @@ public interface WriteBatchInterface { /** * Pop the most recent save point. - * + *

    * That is to say that it removes the last save point, * which was set by {@link #setSavePoint()}. * diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index d85b8e3f7..d41be5856 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -10,10 +10,10 @@ import java.nio.ByteBuffer; /** * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable * index built for all the keys inserted. - * + *

    * Calling put, merge, remove or putLogData calls the same function * as with {@link org.rocksdb.WriteBatch} whilst also building an index. - * + *

    * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to * create an iterator over the write batch or * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} @@ -22,7 +22,7 @@ import java.nio.ByteBuffer; public class WriteBatchWithIndex extends AbstractWriteBatch { /** * Creates a WriteBatchWithIndex where no bytes - * are reserved up-front, bytewise comparison is + * are reserved up-front, byte wise comparison is * used for fallback key comparisons, * and duplicate keys operations are retained */ @@ -30,10 +30,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { super(newWriteBatchWithIndex()); } - /** * Creates a WriteBatchWithIndex where no bytes - * are reserved up-front, bytewise comparison is + * are reserved up-front, byte wise comparison is * used for fallback key comparisons, and duplicate key * assignment is determined by the constructor argument * @@ -48,9 +47,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { /** * Creates a WriteBatchWithIndex * - * @param fallbackIndexComparator We fallback to this comparator + * @param fallbackIndexComparator We fall back to this comparator * to compare keys within a column family if we cannot determine - * the column family and so look up it's comparator. + * the column family and so look up its comparator. * * @param reservedBytes reserved bytes in underlying WriteBatch * @@ -115,7 +114,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * Provides Read-Your-Own-Writes like functionality by * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} * as a delta and baseIterator as a base - * + *

    * Updating write batch with the current key of the iterator is not safe. * We strongly recommend users not to do it. It will invalidate the current * key() and value() of the iterator. This invalidation happens even before @@ -138,7 +137,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * Provides Read-Your-Own-Writes like functionality by * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} * as a delta and baseIterator as a base - * + *

    * Updating write batch with the current key of the iterator is not safe. * We strongly recommend users not to do it. It will invalidate the current * key() and value() of the iterator. This invalidation happens even before @@ -173,7 +172,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * @param baseIterator The base iterator, * e.g. {@link org.rocksdb.RocksDB#newIterator()} * @return An iterator which shows a view comprised of both the database - * point-in-timefrom baseIterator and modifications made in this write batch. + * point-in-time from baseIterator and modifications made in this write batch. */ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator, null); @@ -189,7 +188,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * e.g. {@link org.rocksdb.RocksDB#newIterator()} * @param readOptions the read options, or null * @return An iterator which shows a view comprised of both the database - * point-in-timefrom baseIterator and modifications made in this write batch. + * point-in-time from baseIterator and modifications made in this write batch. */ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator, /* @Nullable */ final ReadOptions readOptions) { @@ -238,11 +237,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { /** * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also * read writes from this batch. - * + *

    * This function will query both this batch and the DB and then merge * the results using the DB's merge operator (if the batch contains any * merge requests). - * + *

    * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is * read from the DB but will NOT change which keys are read from the batch * (the keys in this batch do not yet belong to any snapshot and will be @@ -268,11 +267,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { /** * Similar to {@link RocksDB#get(byte[])} but will also * read writes from this batch. - * + *

    * This function will query both this batch and the DB and then merge * the results using the DB's merge operator (if the batch contains any * merge requests). - * + *

    * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is * read from the DB but will NOT change which keys are read from the batch * (the keys in this batch do not yet belong to any snapshot and will be @@ -338,12 +337,10 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { final long maxBytes); @Override final native WriteBatch getWriteBatch(final long handle); - private native static long newWriteBatchWithIndex(); - private native static long newWriteBatchWithIndex(final boolean overwriteKey); - private native static long newWriteBatchWithIndex( - final long fallbackIndexComparatorHandle, - final byte comparatorType, final int reservedBytes, - final boolean overwriteKey); + private static native long newWriteBatchWithIndex(); + private static native long newWriteBatchWithIndex(final boolean overwriteKey); + private static native long newWriteBatchWithIndex(final long fallbackIndexComparatorHandle, + final byte comparatorType, final int reservedBytes, final boolean overwriteKey); private native long iterator0(final long handle); private native long iterator1(final long handle, final long cfHandle); private native long iteratorWithBase(final long handle, final long cfHandle, diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index 8ec963958..3364d6eab 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -15,7 +15,7 @@ public class WriteBufferManager extends RocksObject { /** * Construct a new instance of WriteBufferManager. - * + *

    * Check * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager * for more details on when to use it @@ -40,11 +40,11 @@ public class WriteBufferManager extends RocksObject { return allowStall_; } - private native static long newWriteBufferManager( + private static native long newWriteBufferManager( final long bufferSizeBytes, final long cacheHandle, final boolean allowStall); @Override protected native void disposeInternal(final long handle); - private boolean allowStall_; + private final boolean allowStall_; } diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index 5a3ffa6c5..7c184b094 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -7,7 +7,7 @@ package org.rocksdb; /** * Options that control write operations. - * + *

    * Note that developers should call WriteOptions.dispose() to release the * c++ side memory before a WriteOptions instance runs out of scope. */ @@ -28,33 +28,32 @@ public class WriteOptions extends RocksObject { /** * Copy constructor for WriteOptions. - * + *

    * NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter, * compaction_filter_factory and other pointers will be cloned! * * @param other The ColumnFamilyOptions to copy. */ - public WriteOptions(WriteOptions other) { + public WriteOptions(final WriteOptions other) { super(copyWriteOptions(other.nativeHandle_)); } - /** * If true, the write will be flushed from the operating system * buffer cache (by calling WritableFile::Sync()) before the write * is considered complete. If this flag is true, writes will be * slower. - * + *

    * If this flag is false, and the machine crashes, some recent * writes may be lost. Note that if it is just the process that * crashes (i.e., the machine does not reboot), no writes will be * lost even if sync==false. - * + *

    * In other words, a DB write with sync==false has similar * crash semantics as the "write()" system call. A DB write * with sync==true has similar crash semantics to a "write()" * system call followed by "fdatasync()". - * + *

    * Default: false * * @param flag a boolean flag to indicate whether a write @@ -71,12 +70,12 @@ public class WriteOptions extends RocksObject { * buffer cache (by calling WritableFile::Sync()) before the write * is considered complete. If this flag is true, writes will be * slower. - * + *

    * If this flag is false, and the machine crashes, some recent * writes may be lost. Note that if it is just the process that * crashes (i.e., the machine does not reboot), no writes will be * lost even if sync==false. - * + *

    * In other words, a DB write with sync==false has similar * crash semantics as the "write()" system call. A DB write * with sync==true has similar crash semantics to a "write()" @@ -121,7 +120,7 @@ public class WriteOptions extends RocksObject { * If true and if user is trying to write to column families that don't exist * (they were dropped), ignore the write (don't return an error). If there * are multiple writes in a WriteBatch, other writes will succeed. - * + *

    * Default: false * * @param ignoreMissingColumnFamilies true to ignore writes to column families @@ -138,7 +137,7 @@ public class WriteOptions extends RocksObject { * If true and if user is trying to write to column families that don't exist * (they were dropped), ignore the write (don't return an error). If there * are multiple writes in a WriteBatch, other writes will succeed. - * + *

    * Default: false * * @return true if writes to column families which don't exist are ignored @@ -175,7 +174,7 @@ public class WriteOptions extends RocksObject { * will be cancelled immediately with {@link Status.Code#Incomplete} returned. * Otherwise, it will be slowed down. The slowdown value is determined by * RocksDB to guarantee it introduces minimum impacts to high priority writes. - * + *

    * Default: false * * @param lowPri true if the write request should be of lower priority than @@ -191,7 +190,7 @@ public class WriteOptions extends RocksObject { /** * Returns true if this write request is of lower priority if compaction is * behind. - * + *

    * See {@link #setLowPri(boolean)}. * * @return true if this write request is of lower priority, false otherwise. @@ -206,7 +205,7 @@ public class WriteOptions extends RocksObject { * in concurrent writes if keys in one writebatch are sequential. In * non-concurrent writes (when {@code concurrent_memtable_writes} is false) this * option will be ignored. - * + *

    * Default: false * * @return true if writebatch will maintain the last insert positions of each memtable as hints in @@ -222,7 +221,7 @@ public class WriteOptions extends RocksObject { * in concurrent writes if keys in one writebatch are sequential. In * non-concurrent writes (when {@code concurrent_memtable_writes} is false) this * option will be ignored. - * + *

    * Default: false * * @param memtableInsertHintPerBatch true if writebatch should maintain the last insert positions @@ -234,8 +233,8 @@ public class WriteOptions extends RocksObject { return this; } - private native static long newWriteOptions(); - private native static long copyWriteOptions(long handle); + private static native long newWriteOptions(); + private static native long copyWriteOptions(long handle); @Override protected final native void disposeInternal(final long handle); private native void setSync(long handle, boolean flag); diff --git a/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java index 4aef0eda9..1cade0acb 100644 --- a/java/src/main/java/org/rocksdb/WriteStallInfo.java +++ b/java/src/main/java/org/rocksdb/WriteStallInfo.java @@ -51,12 +51,12 @@ public class WriteStallInfo { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - WriteStallInfo that = (WriteStallInfo) o; + final WriteStallInfo that = (WriteStallInfo) o; return Objects.equals(columnFamilyName, that.columnFamilyName) && currentCondition == that.currentCondition && previousCondition == that.previousCondition; } diff --git a/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java index 46685f9fd..d57258009 100644 --- a/java/src/test/java/org/rocksdb/AbstractTransactionTest.java +++ b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java @@ -5,26 +5,22 @@ package org.rocksdb; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; /** * Base class of {@link TransactionTest} and {@link OptimisticTransactionTest} */ public abstract class AbstractTransactionTest { - - protected final static byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf" - .getBytes(); + protected static final byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf".getBytes(); protected static final Random rand = PlatformRandomHelper. getPlatformSpecificRandomFactory(); @@ -107,8 +103,8 @@ public abstract class AbstractTransactionTest { @Test public void commit() throws RocksDBException { - final byte k1[] = "rollback-key1".getBytes(UTF_8); - final byte v1[] = "rollback-value1".getBytes(UTF_8); + final byte[] k1 = "rollback-key1".getBytes(UTF_8); + final byte[] v1 = "rollback-value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb()) { try(final Transaction txn = dbContainer.beginTransaction()) { txn.put(k1, v1); @@ -124,8 +120,8 @@ public abstract class AbstractTransactionTest { @Test public void rollback() throws RocksDBException { - final byte k1[] = "rollback-key1".getBytes(UTF_8); - final byte v1[] = "rollback-value1".getBytes(UTF_8); + final byte[] k1 = "rollback-key1".getBytes(UTF_8); + final byte[] v1 = "rollback-value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb()) { try(final Transaction txn = dbContainer.beginTransaction()) { txn.put(k1, v1); @@ -141,10 +137,10 @@ public abstract class AbstractTransactionTest { @Test public void savePoint() throws RocksDBException { - final byte k1[] = "savePoint-key1".getBytes(UTF_8); - final byte v1[] = "savePoint-value1".getBytes(UTF_8); - final byte k2[] = "savePoint-key2".getBytes(UTF_8); - final byte v2[] = "savePoint-value2".getBytes(UTF_8); + final byte[] k1 = "savePoint-key1".getBytes(UTF_8); + final byte[] v1 = "savePoint-value1".getBytes(UTF_8); + final byte[] k2 = "savePoint-key2".getBytes(UTF_8); + final byte[] v2 = "savePoint-value2".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions()) { @@ -179,8 +175,8 @@ public abstract class AbstractTransactionTest { @Test public void getPut_cf() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -193,8 +189,8 @@ public abstract class AbstractTransactionTest { @Test public void getPut() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -279,8 +275,8 @@ public abstract class AbstractTransactionTest { @Test public void getForUpdate_cf() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -293,8 +289,8 @@ public abstract class AbstractTransactionTest { @Test public void getForUpdate() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -306,12 +302,8 @@ public abstract class AbstractTransactionTest { @Test public void multiGetForUpdate_cf() throws RocksDBException { - final byte keys[][] = new byte[][] { - "key1".getBytes(UTF_8), - "key2".getBytes(UTF_8)}; - final byte values[][] = new byte[][] { - "value1".getBytes(UTF_8), - "value2".getBytes(UTF_8)}; + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); @@ -331,12 +323,8 @@ public abstract class AbstractTransactionTest { @Test public void multiGetForUpdate() throws RocksDBException { - final byte keys[][] = new byte[][]{ - "key1".getBytes(UTF_8), - "key2".getBytes(UTF_8)}; - final byte values[][] = new byte[][]{ - "value1".getBytes(UTF_8), - "value2".getBytes(UTF_8)}; + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; try (final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); @@ -349,6 +337,53 @@ public abstract class AbstractTransactionTest { } } + @Test + public void multiGetForUpdateAsList_cf() throws RocksDBException { + final List keys = Arrays.asList("key1".getBytes(UTF_8), "key2".getBytes(UTF_8)); + final List values = Arrays.asList("value1".getBytes(UTF_8), "value2".getBytes(UTF_8)); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGetForUpdateAsList(readOptions, cfList, keys)) + .isEqualTo(Arrays.asList(null, null)); + + txn.put(testCf, keys.get(0), values.get(0)); + txn.put(testCf, keys.get(1), values.get(1)); + final List result = txn.multiGetForUpdateAsList(readOptions, cfList, keys); + assertThat(result.size()).isEqualTo(values.size()); + for (int i = 0; i < result.size(); i++) { + assertThat(result.get(i)).isEqualTo(values.get(i)); + } + } + } + + @Test + public void multiGetForUpdateAsList() throws RocksDBException { + final List keys = Arrays.asList("key1".getBytes(UTF_8), "key2".getBytes(UTF_8)); + final List values = Arrays.asList("value1".getBytes(UTF_8), "value2".getBytes(UTF_8)); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final List nulls = new ArrayList<>(); + nulls.add(null); + nulls.add(null); + assertThat(txn.multiGetForUpdateAsList(readOptions, keys)).isEqualTo(nulls); + + txn.put(keys.get(0), values.get(0)); + txn.put(keys.get(1), values.get(1)); + final List result = txn.multiGetForUpdateAsList(readOptions, keys); + assertThat(result.size()).isEqualTo(values.size()); + for (int i = 0; i < result.size(); i++) { + assertThat(result.get(i)).isEqualTo(values.get(i)); + } + } + } + @Test public void getIterator() throws RocksDBException { try(final DBContainer dbContainer = startDb(); @@ -449,12 +484,8 @@ public abstract class AbstractTransactionTest { @Test public void delete_parts_cf() throws RocksDBException { - final byte keyParts[][] = new byte[][] { - "ke".getBytes(UTF_8), - "y1".getBytes(UTF_8)}; - final byte valueParts[][] = new byte[][] { - "val".getBytes(UTF_8), - "ue1".getBytes(UTF_8)}; + final byte[][] keyParts = new byte[][] {"ke".getBytes(UTF_8), "y1".getBytes(UTF_8)}; + final byte[][] valueParts = new byte[][] {"val".getBytes(UTF_8), "ue1".getBytes(UTF_8)}; final byte[] key = concat(keyParts); final byte[] value = concat(valueParts); @@ -474,12 +505,8 @@ public abstract class AbstractTransactionTest { @Test public void delete_parts() throws RocksDBException { - final byte keyParts[][] = new byte[][] { - "ke".getBytes(UTF_8), - "y1".getBytes(UTF_8)}; - final byte valueParts[][] = new byte[][] { - "val".getBytes(UTF_8), - "ue1".getBytes(UTF_8)}; + final byte[][] keyParts = new byte[][] {"ke".getBytes(UTF_8), "y1".getBytes(UTF_8)}; + final byte[][] valueParts = new byte[][] {"val".getBytes(UTF_8), "ue1".getBytes(UTF_8)}; final byte[] key = concat(keyParts); final byte[] value = concat(valueParts); @@ -499,8 +526,8 @@ public abstract class AbstractTransactionTest { @Test public void getPutUntracked_cf() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -513,8 +540,8 @@ public abstract class AbstractTransactionTest { @Test public void getPutUntracked() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -527,12 +554,8 @@ public abstract class AbstractTransactionTest { @Deprecated @Test public void multiGetPutUntracked_cf() throws RocksDBException { - final byte keys[][] = new byte[][] { - "key1".getBytes(UTF_8), - "key2".getBytes(UTF_8)}; - final byte values[][] = new byte[][] { - "value1".getBytes(UTF_8), - "value2".getBytes(UTF_8)}; + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); @@ -659,12 +682,8 @@ public abstract class AbstractTransactionTest { @Test public void deleteUntracked_parts_cf() throws RocksDBException { - final byte keyParts[][] = new byte[][] { - "ke".getBytes(UTF_8), - "y1".getBytes(UTF_8)}; - final byte valueParts[][] = new byte[][] { - "val".getBytes(UTF_8), - "ue1".getBytes(UTF_8)}; + final byte[][] keyParts = new byte[][] {"ke".getBytes(UTF_8), "y1".getBytes(UTF_8)}; + final byte[][] valueParts = new byte[][] {"val".getBytes(UTF_8), "ue1".getBytes(UTF_8)}; final byte[] key = concat(keyParts); final byte[] value = concat(valueParts); @@ -682,12 +701,8 @@ public abstract class AbstractTransactionTest { @Test public void deleteUntracked_parts() throws RocksDBException { - final byte keyParts[][] = new byte[][] { - "ke".getBytes(UTF_8), - "y1".getBytes(UTF_8)}; - final byte valueParts[][] = new byte[][] { - "val".getBytes(UTF_8), - "ue1".getBytes(UTF_8)}; + final byte[][] keyParts = new byte[][] {"ke".getBytes(UTF_8), "y1".getBytes(UTF_8)}; + final byte[][] valueParts = new byte[][] {"val".getBytes(UTF_8), "ue1".getBytes(UTF_8)}; final byte[] key = concat(keyParts); final byte[] value = concat(valueParts); @@ -724,12 +739,12 @@ public abstract class AbstractTransactionTest { @Test public void numKeys() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); - final byte k2[] = "key2".getBytes(UTF_8); - final byte v2[] = "value2".getBytes(UTF_8); - final byte k3[] = "key3".getBytes(UTF_8); - final byte v3[] = "value3".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] k2 = "key2".getBytes(UTF_8); + final byte[] v2 = "value2".getBytes(UTF_8); + final byte[] k3 = "key3".getBytes(UTF_8); + final byte[] v3 = "value3".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final Transaction txn = dbContainer.beginTransaction()) { @@ -761,8 +776,8 @@ public abstract class AbstractTransactionTest { @Test public void getWriteBatch() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final Transaction txn = dbContainer.beginTransaction()) { @@ -786,8 +801,8 @@ public abstract class AbstractTransactionTest { @Test public void writeOptions() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final WriteOptions writeOptions = new WriteOptions() @@ -816,8 +831,8 @@ public abstract class AbstractTransactionTest { @Test public void undoGetForUpdate_cf() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -831,8 +846,8 @@ public abstract class AbstractTransactionTest { @Test public void undoGetForUpdate() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); final Transaction txn = dbContainer.beginTransaction()) { @@ -845,12 +860,12 @@ public abstract class AbstractTransactionTest { @Test public void rebuildFromWriteBatch() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); - final byte k2[] = "key2".getBytes(UTF_8); - final byte v2[] = "value2".getBytes(UTF_8); - final byte k3[] = "key3".getBytes(UTF_8); - final byte v3[] = "value3".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] k2 = "key2".getBytes(UTF_8); + final byte[] v2 = "value2".getBytes(UTF_8); + final byte[] k3 = "key3".getBytes(UTF_8); + final byte[] v3 = "value3".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final ReadOptions readOptions = new ReadOptions(); @@ -876,8 +891,8 @@ public abstract class AbstractTransactionTest { @Test public void getCommitTimeWriteBatch() throws RocksDBException { - final byte k1[] = "key1".getBytes(UTF_8); - final byte v1[] = "value1".getBytes(UTF_8); + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); try(final DBContainer dbContainer = startDb(); final Transaction txn = dbContainer.beginTransaction()) { @@ -933,8 +948,7 @@ public abstract class AbstractTransactionTest { } } - protected static abstract class DBContainer - implements AutoCloseable { + protected abstract static class DBContainer implements AutoCloseable { protected final WriteOptions writeOptions; protected final List columnFamilyHandles; protected final ColumnFamilyOptions columnFamilyOptions; diff --git a/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java b/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java index 794bf04fb..b07f8d33c 100644 --- a/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java @@ -15,8 +15,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; public class BackupEngineOptionsTest { - private final static String ARBITRARY_PATH = - System.getProperty("java.io.tmpdir"); + private static final String ARBITRARY_PATH = System.getProperty("java.io.tmpdir"); @ClassRule public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = @@ -61,13 +60,10 @@ public class BackupEngineOptionsTest { try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { assertThat(backupEngineOptions.infoLog()).isNull(); - try(final Options options = new Options(); - final Logger logger = new Logger(options){ - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - - } - }) { + try (final Options options = new Options(); final Logger logger = new Logger(options) { + @Override + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) {} + }) { backupEngineOptions.setInfoLog(logger); assertThat(backupEngineOptions.infoLog()).isEqualTo(logger); } @@ -85,7 +81,7 @@ public class BackupEngineOptionsTest { @Test public void destroyOldData() { - try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH);) { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { final boolean value = rand.nextBoolean(); backupEngineOptions.setDestroyOldData(value); assertThat(backupEngineOptions.destroyOldData()).isEqualTo(value); @@ -154,7 +150,7 @@ public class BackupEngineOptionsTest { @Test public void shareFilesWithChecksum() { try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { - boolean value = rand.nextBoolean(); + final boolean value = rand.nextBoolean(); backupEngineOptions.setShareFilesWithChecksum(value); assertThat(backupEngineOptions.shareFilesWithChecksum()).isEqualTo(value); } @@ -181,7 +177,7 @@ public class BackupEngineOptionsTest { @Test public void failBackupDirIsNull() { exception.expect(IllegalArgumentException.class); - try (final BackupEngineOptions opts = new BackupEngineOptions(null)) { + try (final BackupEngineOptions ignored = new BackupEngineOptions(null)) { //no-op } } @@ -202,7 +198,7 @@ public class BackupEngineOptionsTest { @Test public void failShareTableFilesIfDisposed() { - try (BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { options.shareTableFiles(); } } @@ -291,7 +287,8 @@ public class BackupEngineOptionsTest { } } - private BackupEngineOptions setupUninitializedBackupEngineOptions(ExpectedException exception) { + private BackupEngineOptions setupUninitializedBackupEngineOptions( + final ExpectedException exception) { final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH); backupEngineOptions.close(); exception.expect(AssertionError.class); diff --git a/java/src/test/java/org/rocksdb/BlobOptionsTest.java b/java/src/test/java/org/rocksdb/BlobOptionsTest.java index fe3d9b246..a0a2af84a 100644 --- a/java/src/test/java/org/rocksdb/BlobOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BlobOptionsTest.java @@ -7,8 +7,6 @@ package org.rocksdb; import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; -import java.io.File; -import java.io.FilenameFilter; import java.util.*; import org.junit.ClassRule; import org.junit.Rule; @@ -34,35 +32,29 @@ public class BlobOptionsTest { */ @SuppressWarnings("CallToStringConcatCanBeReplacedByOperator") private int countDBFiles(final String endsWith) { - return Objects - .requireNonNull(dbFolder.getRoot().list(new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.endsWith(endsWith); - } - })) + return Objects.requireNonNull(dbFolder.getRoot().list((dir, name) -> name.endsWith(endsWith))) .length; } @SuppressWarnings("SameParameterValue") - private byte[] small_key(String suffix) { + private byte[] small_key(final String suffix) { return ("small_key_" + suffix).getBytes(UTF_8); } @SuppressWarnings("SameParameterValue") - private byte[] small_value(String suffix) { + private byte[] small_value(final String suffix) { return ("small_value_" + suffix).getBytes(UTF_8); } - private byte[] large_key(String suffix) { + private byte[] large_key(final String suffix) { return ("large_key_" + suffix).getBytes(UTF_8); } - private byte[] large_value(String repeat) { + private byte[] large_value(final String repeat) { final byte[] large_value = ("" + repeat + "_" + largeBlobSize + "b").getBytes(UTF_8); final byte[] large_buffer = new byte[largeBlobSize]; for (int pos = 0; pos < largeBlobSize; pos += large_value.length) { - int numBytes = Math.min(large_value.length, large_buffer.length - pos); + final int numBytes = Math.min(large_value.length, large_buffer.length - pos); System.arraycopy(large_value, 0, large_buffer, pos, numBytes); } return large_buffer; @@ -232,14 +224,18 @@ public class BlobOptionsTest { final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { db.put(small_key("default"), small_value("default")); - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } // check there are no blobs in the database assertThat(countDBFiles(".sst")).isEqualTo(1); assertThat(countDBFiles(".blob")).isEqualTo(0); db.put(large_key("default"), large_value("default")); - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } // wrote and flushed a value larger than the blobbing threshold // check there is a single blob in the database @@ -277,7 +273,9 @@ public class BlobOptionsTest { final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { db.put(columnFamilyHandles.get(0), small_key("default"), small_value("default")); - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } assertThat(countDBFiles(".blob")).isEqualTo(0); @@ -338,12 +336,16 @@ public class BlobOptionsTest { db.put(columnFamilyHandles.get(1), large_key("column_family_1_k2"), large_value("column_family_1_k2")); - db.flush(new FlushOptions().setWaitForFlush(true), columnFamilyHandles.get(1)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions, columnFamilyHandles.get(1)); + } assertThat(countDBFiles(".blob")).isEqualTo(1); db.put(columnFamilyHandles.get(2), large_key("column_family_2_k2"), large_value("column_family_2_k2")); - db.flush(new FlushOptions().setWaitForFlush(true), columnFamilyHandles.get(2)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions, columnFamilyHandles.get(2)); + } assertThat(countDBFiles(".blob")).isEqualTo(1); } } diff --git a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java index 005c8bc6d..13247d1e6 100644 --- a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -110,7 +110,7 @@ public class BlockBasedTableConfigTest { tableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch); tableConfig.setChecksumType(ChecksumType.kNoChecksum); try (final Options options = new Options().setTableFormatConfig(tableConfig)) { - String opts = getOptionAsString(options); + final String opts = getOptionAsString(options); assertThat(opts).contains("index_type=kBinarySearch"); assertThat(opts).contains("data_block_index_type=kDataBlockBinarySearch"); assertThat(opts).contains("checksum=kNoChecksum"); @@ -121,7 +121,7 @@ public class BlockBasedTableConfigTest { tableConfig.setChecksumType(ChecksumType.kCRC32c); try (final Options options = new Options().setTableFormatConfig(tableConfig)) { options.useCappedPrefixExtractor(1); // Needed to use kHashSearch - String opts = getOptionAsString(options); + final String opts = getOptionAsString(options); assertThat(opts).contains("index_type=kHashSearch"); assertThat(opts).contains("data_block_index_type=kDataBlockBinaryAndHash"); assertThat(opts).contains("checksum=kCRC32c"); @@ -130,7 +130,7 @@ public class BlockBasedTableConfigTest { tableConfig.setIndexType(IndexType.kTwoLevelIndexSearch); tableConfig.setChecksumType(ChecksumType.kxxHash); try (final Options options = new Options().setTableFormatConfig(tableConfig)) { - String opts = getOptionAsString(options); + final String opts = getOptionAsString(options); assertThat(opts).contains("index_type=kTwoLevelIndexSearch"); assertThat(opts).contains("checksum=kxxHash"); } @@ -138,30 +138,29 @@ public class BlockBasedTableConfigTest { tableConfig.setIndexType(IndexType.kBinarySearchWithFirstKey); tableConfig.setChecksumType(ChecksumType.kxxHash64); try (final Options options = new Options().setTableFormatConfig(tableConfig)) { - String opts = getOptionAsString(options); + final String opts = getOptionAsString(options); assertThat(opts).contains("index_type=kBinarySearchWithFirstKey"); assertThat(opts).contains("checksum=kxxHash64"); } tableConfig.setChecksumType(ChecksumType.kXXH3); try (final Options options = new Options().setTableFormatConfig(tableConfig)) { - String opts = getOptionAsString(options); + final String opts = getOptionAsString(options); assertThat(opts).contains("checksum=kXXH3"); } } - private String getOptionAsString(Options options) throws Exception { + private String getOptionAsString(final Options options) throws Exception { options.setCreateIfMissing(true); - String dbPath = dbFolder.getRoot().getAbsolutePath(); - String result; - try (final RocksDB db = RocksDB.open(options, dbPath); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + final String result; + try (final RocksDB ignored = RocksDB.open(options, dbPath); final Stream pathStream = Files.walk(Paths.get(dbPath))) { - Path optionsPath = - pathStream - .filter(p -> p.getFileName().toString().startsWith("OPTIONS")) + final Path optionsPath = + pathStream.filter(p -> p.getFileName().toString().startsWith("OPTIONS")) .findAny() .orElseThrow(() -> new AssertionError("Missing options file")); - byte[] optionsData = Files.readAllBytes(optionsPath); + final byte[] optionsData = Files.readAllBytes(optionsPath); result = new String(optionsData, StandardCharsets.UTF_8); } RocksDB.destroyDB(dbPath, options); @@ -357,7 +356,7 @@ public class BlockBasedTableConfigTest { new BlockBasedTableConfig().setFormatVersion(99999); try (final Options options = new Options().setTableFormatConfig(blockBasedTableConfig); - final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { fail("Opening the database with an invalid format_version should have raised an exception"); } } diff --git a/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java b/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java index fe950362b..13aa6c2bd 100644 --- a/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java +++ b/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java @@ -21,7 +21,7 @@ import org.rocksdb.util.BytewiseComparator; * by a change made between 6.2.2 and 6.22.1, * to wit {@link ...} * which as part of its effect, changed the Java bytewise comparators. - * + *

    * {@link ...} * {@link ...} */ @@ -34,8 +34,8 @@ public class BytewiseComparatorRegressionTest { @Rule public TemporaryFolder temporarySSTFolder = new TemporaryFolder(); - private final static byte[][] testData = {{10, -11, 13}, {10, 11, 12}, {10, 11, 14}}; - private final static byte[][] orderedData = {{10, 11, 12}, {10, 11, 14}, {10, -11, 13}}; + private static final byte[][] testData = {{10, -11, 13}, {10, 11, 12}, {10, 11, 14}}; + private static final byte[][] orderedData = {{10, 11, 12}, {10, 11, 14}, {10, -11, 13}}; /** * {@link ...} @@ -43,12 +43,16 @@ public class BytewiseComparatorRegressionTest { @Test public void testJavaComparator() throws RocksDBException { final BytewiseComparator comparator = new BytewiseComparator(new ComparatorOptions()); - performTest(new Options().setCreateIfMissing(true).setComparator(comparator)); + try (final Options options = new Options().setCreateIfMissing(true).setComparator(comparator)) { + performTest(options); + } } @Test public void testDefaultComparator() throws RocksDBException { - performTest(new Options().setCreateIfMissing(true)); + try (final Options options = new Options().setCreateIfMissing(true)) { + performTest(options); + } } /** @@ -56,8 +60,10 @@ public class BytewiseComparatorRegressionTest { */ @Test public void testCppComparator() throws RocksDBException { - performTest(new Options().setCreateIfMissing(true).setComparator( - BuiltinComparator.BYTEWISE_COMPARATOR)); + try (final Options options = new Options().setCreateIfMissing(true).setComparator( + BuiltinComparator.BYTEWISE_COMPARATOR)) { + performTest(options); + } } private void performTest(final Options options) throws RocksDBException { diff --git a/java/src/test/java/org/rocksdb/CheckPointTest.java b/java/src/test/java/org/rocksdb/CheckPointTest.java index c2cc6fc62..2b3cc7a3b 100644 --- a/java/src/test/java/org/rocksdb/CheckPointTest.java +++ b/java/src/test/java/org/rocksdb/CheckPointTest.java @@ -59,8 +59,7 @@ public class CheckPointTest { @Test(expected = IllegalArgumentException.class) public void failIfDbIsNull() { - try (final Checkpoint checkpoint = Checkpoint.create(null)) { - + try (final Checkpoint ignored = Checkpoint.create(null)) { } } diff --git a/java/src/test/java/org/rocksdb/ClockCacheTest.java b/java/src/test/java/org/rocksdb/ClockCacheTest.java index d1241ac75..718c24f70 100644 --- a/java/src/test/java/org/rocksdb/ClockCacheTest.java +++ b/java/src/test/java/org/rocksdb/ClockCacheTest.java @@ -18,8 +18,7 @@ public class ClockCacheTest { final long capacity = 1000; final int numShardBits = 16; final boolean strictCapacityLimit = true; - try(final Cache clockCache = new ClockCache(capacity, - numShardBits, strictCapacityLimit)) { + try (final Cache ignored = new ClockCache(capacity, numShardBits, strictCapacityLimit)) { //no op } } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java index 7d7581048..a5fe8cef7 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -9,7 +9,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Paths; import java.util.*; import org.junit.ClassRule; @@ -27,11 +26,11 @@ public class ColumnFamilyOptionsTest { @Test public void copyConstructor() { - ColumnFamilyOptions origOpts = new ColumnFamilyOptions(); + final ColumnFamilyOptions origOpts = new ColumnFamilyOptions(); origOpts.setNumLevels(rand.nextInt(8)); origOpts.setTargetFileSizeMultiplier(rand.nextInt(100)); origOpts.setLevel0StopWritesTrigger(rand.nextInt(50)); - ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts); + final ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts); assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels()); assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier()); assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger()); @@ -39,7 +38,7 @@ public class ColumnFamilyOptionsTest { @Test public void getColumnFamilyOptionsFromProps() { - Properties properties = new Properties(); + final Properties properties = new Properties(); properties.put("write_buffer_size", "112"); properties.put("max_write_buffer_number", "13"); @@ -90,16 +89,15 @@ public class ColumnFamilyOptionsTest { @Test(expected = IllegalArgumentException.class) public void failColumnFamilyOptionsFromPropsWithNullValue() { - try (final ColumnFamilyOptions opt = + try (final ColumnFamilyOptions ignored = ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) { } } @Test(expected = IllegalArgumentException.class) public void failColumnFamilyOptionsFromPropsWithEmptyProps() { - try (final ColumnFamilyOptions opt = - ColumnFamilyOptions.getColumnFamilyOptionsFromProps( - new Properties())) { + try (final ColumnFamilyOptions ignored = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(new Properties())) { } } @@ -455,7 +453,7 @@ public class ColumnFamilyOptionsTest { } columnFamilyOptions.setCompressionPerLevel(compressionTypeList); compressionTypeList = columnFamilyOptions.compressionPerLevel(); - for (CompressionType compressionType : compressionTypeList) { + for (final CompressionType compressionType : compressionTypeList) { assertThat(compressionType).isEqualTo( CompressionType.NO_COMPRESSION); } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java index e98327d93..fb8a45085 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -22,16 +22,14 @@ public class ColumnFamilyTest { public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource(); - @Rule - public TemporaryFolder dbFolder = new TemporaryFolder(); + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); @Test public void columnFamilyDescriptorName() throws RocksDBException { final byte[] cfName = "some_name".getBytes(UTF_8); try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { - final ColumnFamilyDescriptor cfDescriptor = - new ColumnFamilyDescriptor(cfName, cfOptions); + final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, cfOptions); assertThat(cfDescriptor.getName()).isEqualTo(cfName); } } @@ -40,24 +38,23 @@ public class ColumnFamilyTest { public void columnFamilyDescriptorOptions() throws RocksDBException { final byte[] cfName = "some_name".getBytes(UTF_8); - try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() - .setCompressionType(CompressionType.BZLIB2_COMPRESSION)) { + try (final ColumnFamilyOptions cfOptions = + new ColumnFamilyOptions().setCompressionType(CompressionType.BZLIB2_COMPRESSION)) { final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, cfOptions); - assertThat(cfDescriptor.getOptions().compressionType()) - .isEqualTo(CompressionType.BZLIB2_COMPRESSION); + assertThat(cfDescriptor.getOptions().compressionType()) + .isEqualTo(CompressionType.BZLIB2_COMPRESSION); } } @Test public void listColumnFamilies() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { // Test listColumnFamilies - final List columnFamilyNames = RocksDB.listColumnFamilies(options, - dbFolder.getRoot().getAbsolutePath()); + final List columnFamilyNames = + RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); assertThat(columnFamilyNames).isNotNull(); assertThat(columnFamilyNames.size()).isGreaterThan(0); assertThat(columnFamilyNames.size()).isEqualTo(1); @@ -70,8 +67,7 @@ public class ColumnFamilyTest { try (final Options options = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { - final ColumnFamilyHandle cfh = db.getDefaultColumnFamily(); - try { + try (final ColumnFamilyHandle cfh = db.getDefaultColumnFamily()) { assertThat(cfh).isNotNull(); assertThat(cfh.getName()).isEqualTo("default".getBytes(UTF_8)); @@ -87,8 +83,6 @@ public class ColumnFamilyTest { assertThat(cfh).isNotNull(); assertThat(actualValue).isEqualTo(value); - } finally { - cfh.close(); } } } @@ -96,31 +90,25 @@ public class ColumnFamilyTest { @Test public void createColumnFamily() throws RocksDBException { final byte[] cfName = "new_cf".getBytes(UTF_8); - final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, - new ColumnFamilyOptions()); + final ColumnFamilyDescriptor cfDescriptor = + new ColumnFamilyDescriptor(cfName, new ColumnFamilyOptions()); try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { - - final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor); - - try { + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + try (final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor)) { assertThat(columnFamilyHandle.getName()).isEqualTo(cfName); assertThat(columnFamilyHandle.getID()).isEqualTo(1); final ColumnFamilyDescriptor latestDescriptor = columnFamilyHandle.getDescriptor(); assertThat(latestDescriptor.getName()).isEqualTo(cfName); - final List columnFamilyNames = RocksDB.listColumnFamilies( - options, dbFolder.getRoot().getAbsolutePath()); + final List columnFamilyNames = + RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); assertThat(columnFamilyNames).isNotNull(); assertThat(columnFamilyNames.size()).isGreaterThan(0); assertThat(columnFamilyNames.size()).isEqualTo(2); assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf"); - } finally { - columnFamilyHandle.close(); } } } @@ -147,7 +135,8 @@ public class ColumnFamilyTest { db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(), "dfvalue".getBytes()); db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(), "newcfvalue".getBytes()); - String retVal = new String(db.get(columnFamilyHandleList.get(1), "newcfkey1".getBytes())); + final String retVal = + new String(db.get(columnFamilyHandleList.get(1), "newcfkey1".getBytes())); assertThat(retVal).isEqualTo("newcfvalue"); assertThat((db.get(columnFamilyHandleList.get(1), "dfkey1".getBytes()))).isNull(); db.delete(columnFamilyHandleList.get(1), "newcfkey1".getBytes()); @@ -160,8 +149,8 @@ public class ColumnFamilyTest { @Test public void getWithOutValueAndCf() throws RocksDBException { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List cfDescriptors = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List columnFamilyHandleList = new ArrayList<>(); // Test open database with column family names @@ -202,7 +191,7 @@ public class ColumnFamilyTest { final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList)) { - ColumnFamilyHandle tmpColumnFamilyHandle; + final ColumnFamilyHandle tmpColumnFamilyHandle; tmpColumnFamilyHandle = db.createColumnFamily( new ColumnFamilyDescriptor("tmpCF".getBytes(), new ColumnFamilyOptions())); db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); @@ -223,8 +212,8 @@ public class ColumnFamilyTest { final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList)) { - ColumnFamilyHandle tmpColumnFamilyHandle = null; - ColumnFamilyHandle tmpColumnFamilyHandle2 = null; + final ColumnFamilyHandle tmpColumnFamilyHandle; + final ColumnFamilyHandle tmpColumnFamilyHandle2; tmpColumnFamilyHandle = db.createColumnFamily( new ColumnFamilyDescriptor("tmpCF".getBytes(), new ColumnFamilyOptions())); tmpColumnFamilyHandle2 = db.createColumnFamily( @@ -264,7 +253,7 @@ public class ColumnFamilyTest { writeBatch.delete(columnFamilyHandleList.get(1), "xyz".getBytes()); db.write(writeOpt, writeBatch); - assertThat(db.get(columnFamilyHandleList.get(1), "xyz".getBytes()) == null); + assertThat(db.get(columnFamilyHandleList.get(1), "xyz".getBytes())).isNull(); assertThat(new String(db.get(columnFamilyHandleList.get(1), "newcfkey".getBytes()))) .isEqualTo("value"); assertThat(new String(db.get(columnFamilyHandleList.get(1), "newcfkey2".getBytes()))) @@ -293,7 +282,7 @@ public class ColumnFamilyTest { db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), "value2".getBytes()); try (final RocksIterator rocksIterator = db.newIterator(columnFamilyHandleList.get(1))) { rocksIterator.seekToFirst(); - Map refMap = new HashMap<>(); + final Map refMap = new HashMap<>(); refMap.put("newcfkey", "value"); refMap.put("newcfkey2", "value2"); int i = 0; @@ -323,8 +312,7 @@ public class ColumnFamilyTest { db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); - final List keys = - Arrays.asList(new byte[][] {"key".getBytes(), "newcfkey".getBytes()}); + final List keys = Arrays.asList("key".getBytes(), "newcfkey".getBytes()); List retValues = db.multiGetAsList(columnFamilyHandleList, keys); assertThat(retValues.size()).isEqualTo(2); @@ -352,8 +340,7 @@ public class ColumnFamilyTest { db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); - final List keys = - Arrays.asList(new byte[][] {"key".getBytes(), "newcfkey".getBytes()}); + final List keys = Arrays.asList("key".getBytes(), "newcfkey".getBytes()); List retValues = db.multiGetAsList(columnFamilyHandleList, keys); assertThat(retValues.size()).isEqualTo(2); assertThat(new String(retValues.get(0))).isEqualTo("value"); @@ -528,15 +515,12 @@ public class ColumnFamilyTest { @Test public void testCFNamesWithZeroBytes() throws RocksDBException { - ColumnFamilyHandle cf1 = null, cf2 = null; try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - ) { + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { final byte[] b0 = new byte[] {0, 0}; final byte[] b1 = new byte[] {0, 1}; - cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); - cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + db.createColumnFamily(new ColumnFamilyDescriptor(b1)); final List families = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); assertThat(families).contains("default".getBytes(), b0, b1); @@ -545,14 +529,10 @@ public class ColumnFamilyTest { @Test public void testCFNameSimplifiedChinese() throws RocksDBException { - ColumnFamilyHandle columnFamilyHandle = null; try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - ) { + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { final String simplifiedChinese = "\u7b80\u4f53\u5b57"; - columnFamilyHandle = - db.createColumnFamily(new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); + db.createColumnFamily(new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); final List families = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); @@ -563,7 +543,7 @@ public class ColumnFamilyTest { @Test public void testDestroyColumnFamilyHandle() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());) { + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { final byte[] name1 = "cf1".getBytes(); final byte[] name2 = "cf2".getBytes(); final ColumnFamilyDescriptor desc1 = new ColumnFamilyDescriptor(name1); diff --git a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java index 57bf22b57..4440d0a71 100644 --- a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java @@ -18,84 +18,85 @@ public class CompactRangeOptionsTest { @Test public void exclusiveManualCompaction() { - CompactRangeOptions opt = new CompactRangeOptions(); - boolean value = false; - opt.setExclusiveManualCompaction(value); - assertThat(opt.exclusiveManualCompaction()).isEqualTo(value); - value = true; - opt.setExclusiveManualCompaction(value); - assertThat(opt.exclusiveManualCompaction()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + opt.setExclusiveManualCompaction(false); + assertThat(opt.exclusiveManualCompaction()).isEqualTo(false); + opt.setExclusiveManualCompaction(true); + assertThat(opt.exclusiveManualCompaction()).isEqualTo(true); + } } @Test public void bottommostLevelCompaction() { - CompactRangeOptions opt = new CompactRangeOptions(); - BottommostLevelCompaction value = BottommostLevelCompaction.kSkip; - opt.setBottommostLevelCompaction(value); - assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); - value = BottommostLevelCompaction.kForce; - opt.setBottommostLevelCompaction(value); - assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); - value = BottommostLevelCompaction.kIfHaveCompactionFilter; - opt.setBottommostLevelCompaction(value); - assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); - value = BottommostLevelCompaction.kForceOptimized; - opt.setBottommostLevelCompaction(value); - assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + BottommostLevelCompaction value = BottommostLevelCompaction.kSkip; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + value = BottommostLevelCompaction.kForce; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + value = BottommostLevelCompaction.kIfHaveCompactionFilter; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + value = BottommostLevelCompaction.kForceOptimized; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + } } @Test public void changeLevel() { - CompactRangeOptions opt = new CompactRangeOptions(); - boolean value = false; - opt.setChangeLevel(value); - assertThat(opt.changeLevel()).isEqualTo(value); - value = true; - opt.setChangeLevel(value); - assertThat(opt.changeLevel()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + opt.setChangeLevel(false); + assertThat(opt.changeLevel()).isEqualTo(false); + opt.setChangeLevel(true); + assertThat(opt.changeLevel()).isEqualTo(true); + } } @Test public void targetLevel() { - CompactRangeOptions opt = new CompactRangeOptions(); - int value = 2; - opt.setTargetLevel(value); - assertThat(opt.targetLevel()).isEqualTo(value); - value = 3; - opt.setTargetLevel(value); - assertThat(opt.targetLevel()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + int value = 2; + opt.setTargetLevel(value); + assertThat(opt.targetLevel()).isEqualTo(value); + value = 3; + opt.setTargetLevel(value); + assertThat(opt.targetLevel()).isEqualTo(value); + } } @Test public void targetPathId() { - CompactRangeOptions opt = new CompactRangeOptions(); - int value = 2; - opt.setTargetPathId(value); - assertThat(opt.targetPathId()).isEqualTo(value); - value = 3; - opt.setTargetPathId(value); - assertThat(opt.targetPathId()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + int value = 2; + opt.setTargetPathId(value); + assertThat(opt.targetPathId()).isEqualTo(value); + value = 3; + opt.setTargetPathId(value); + assertThat(opt.targetPathId()).isEqualTo(value); + } } @Test public void allowWriteStall() { - CompactRangeOptions opt = new CompactRangeOptions(); - boolean value = false; - opt.setAllowWriteStall(value); - assertThat(opt.allowWriteStall()).isEqualTo(value); - value = true; - opt.setAllowWriteStall(value); - assertThat(opt.allowWriteStall()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + opt.setAllowWriteStall(false); + assertThat(opt.allowWriteStall()).isEqualTo(false); + opt.setAllowWriteStall(true); + assertThat(opt.allowWriteStall()).isEqualTo(true); + } } @Test public void maxSubcompactions() { - CompactRangeOptions opt = new CompactRangeOptions(); - int value = 2; - opt.setMaxSubcompactions(value); - assertThat(opt.maxSubcompactions()).isEqualTo(value); - value = 3; - opt.setMaxSubcompactions(value); - assertThat(opt.maxSubcompactions()).isEqualTo(value); + try (final CompactRangeOptions opt = new CompactRangeOptions()) { + int value = 2; + opt.setMaxSubcompactions(value); + assertThat(opt.maxSubcompactions()).isEqualTo(value); + value = 3; + opt.setMaxSubcompactions(value); + assertThat(opt.maxSubcompactions()).isEqualTo(value); + } } } diff --git a/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/java/src/test/java/org/rocksdb/CompressionTypesTest.java index e26cc0aca..a983f471a 100644 --- a/java/src/test/java/org/rocksdb/CompressionTypesTest.java +++ b/java/src/test/java/org/rocksdb/CompressionTypesTest.java @@ -5,16 +5,21 @@ package org.rocksdb; -import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Test; public class CompressionTypesTest { @Test public void getCompressionType() { for (final CompressionType compressionType : CompressionType.values()) { - String libraryName = compressionType.getLibraryName(); - compressionType.equals(CompressionType.getCompressionType( - libraryName)); + final String libraryName = compressionType.getLibraryName(); + if (compressionType == CompressionType.DISABLE_COMPRESSION_OPTION) { + assertThat(CompressionType.getCompressionType(libraryName)) + .isEqualTo(CompressionType.NO_COMPRESSION); + } else { + assertThat(CompressionType.getCompressionType(libraryName)).isEqualTo(compressionType); + } } } -} +} \ No newline at end of file diff --git a/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java index d55ceebcf..882015f3e 100644 --- a/java/src/test/java/org/rocksdb/DBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -27,11 +27,11 @@ public class DBOptionsTest { @Test public void copyConstructor() { - DBOptions origOpts = new DBOptions(); + final DBOptions origOpts = new DBOptions(); origOpts.setCreateIfMissing(rand.nextBoolean()); origOpts.setAllow2pc(rand.nextBoolean()); origOpts.setMaxBackgroundJobs(rand.nextInt(10)); - DBOptions copyOpts = new DBOptions(origOpts); + final DBOptions copyOpts = new DBOptions(origOpts); assertThat(origOpts.createIfMissing()).isEqualTo(copyOpts.createIfMissing()); assertThat(origOpts.allow2pc()).isEqualTo(copyOpts.allow2pc()); } @@ -437,9 +437,8 @@ public class DBOptionsTest { @Test public void setWriteBufferManager() throws RocksDBException { - try (final DBOptions opt = new DBOptions(); - final Cache cache = new LRUCache(1 * 1024 * 1024); - final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) { + try (final DBOptions opt = new DBOptions(); final Cache cache = new LRUCache(1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000L, cache)) { opt.setWriteBufferManager(writeBufferManager); assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); } @@ -447,9 +446,8 @@ public class DBOptionsTest { @Test public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException { - try (final DBOptions opt = new DBOptions(); - final Cache cache = new LRUCache(1 * 1024 * 1024); - final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) { + try (final DBOptions opt = new DBOptions(); final Cache cache = new LRUCache(1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(0L, cache)) { opt.setWriteBufferManager(writeBufferManager); assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); } @@ -888,15 +886,15 @@ public class DBOptionsTest { } }) { assertThat(options.setListeners(Arrays.asList(el1, el2))).isEqualTo(options); - List listeners = options.listeners(); + final List listeners = options.listeners(); assertEquals(el1, listeners.get(0)); assertEquals(el2, listeners.get(1)); - options.setListeners(Collections.emptyList()); + options.setListeners(Collections.emptyList()); listeners.get(0).onTableFileDeleted(null); assertTrue(wasCalled1.get()); listeners.get(1).onMemTableSealed(null); assertTrue(wasCalled2.get()); - List listeners2 = options.listeners(); + final List listeners2 = options.listeners(); assertNotNull(listeners2); assertEquals(0, listeners2.size()); } diff --git a/java/src/test/java/org/rocksdb/EventListenerTest.java b/java/src/test/java/org/rocksdb/EventListenerTest.java index 93ea19c2f..84be232f9 100644 --- a/java/src/test/java/org/rocksdb/EventListenerTest.java +++ b/java/src/test/java/org/rocksdb/EventListenerTest.java @@ -181,7 +181,7 @@ public class EventListenerTest { final byte[] value = new byte[24]; rand.nextBytes(value); db.put("testKey".getBytes(), value); - ColumnFamilyHandle columnFamilyHandle = db.getDefaultColumnFamily(); + final ColumnFamilyHandle columnFamilyHandle = db.getDefaultColumnFamily(); columnFamilyHandle.close(); assertThat(wasCbCalled.get()).isTrue(); } @@ -475,7 +475,7 @@ public class EventListenerTest { private static void assertNoCallbackErrors( final CapturingTestableEventListener capturingTestableEventListener) { - for (AssertionError error : capturingTestableEventListener.capturedAssertionErrors) { + for (final AssertionError error : capturingTestableEventListener.capturedAssertionErrors) { throw new Error("An assertion failed in callback", error); } } @@ -565,16 +565,16 @@ public class EventListenerTest { private static class CapturingObjectAssert extends ObjectAssert { private final List assertionErrors; - public CapturingObjectAssert(T t, List assertionErrors) { + public CapturingObjectAssert(final T t, final List assertionErrors) { super(t); this.assertionErrors = assertionErrors; } @Override - public ObjectAssert isEqualTo(Object other) { + public ObjectAssert isEqualTo(final Object other) { try { return super.isEqualTo(other); - } catch (AssertionError error) { + } catch (final AssertionError error) { assertionErrors.add(error); throw error; } @@ -584,7 +584,7 @@ public class EventListenerTest { public ObjectAssert isNotNull() { try { return super.isNotNull(); - } catch (AssertionError error) { + } catch (final AssertionError error) { assertionErrors.add(error); throw error; } @@ -596,8 +596,8 @@ public class EventListenerTest { final List capturedAssertionErrors = new ArrayList<>(); - protected AbstractObjectAssert assertThat(T actual) { - return new CapturingObjectAssert(actual, capturedAssertionErrors); + protected AbstractObjectAssert assertThat(final T actual) { + return new CapturingObjectAssert<>(actual, capturedAssertionErrors); } public CapturingTestableEventListener() {} diff --git a/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/java/src/test/java/org/rocksdb/InfoLogLevelTest.java index 12ee537d9..90b0b4e2d 100644 --- a/java/src/test/java/org/rocksdb/InfoLogLevelTest.java +++ b/java/src/test/java/org/rocksdb/InfoLogLevelTest.java @@ -95,12 +95,12 @@ public class InfoLogLevelTest { int first_non_header = lines.length; // Identify the last line of the header for (int i = lines.length - 1; i >= 0; --i) { - if (lines[i].indexOf("DB pointer") >= 0) { + if (lines[i].contains("DB pointer")) { first_non_header = i + 1; break; } } - StringBuilder builder = new StringBuilder(); + final StringBuilder builder = new StringBuilder(); for (int i = first_non_header; i < lines.length; ++i) { builder.append(lines[i]).append(separator); } diff --git a/java/src/test/java/org/rocksdb/LoggerTest.java b/java/src/test/java/org/rocksdb/LoggerTest.java index 5bc299f11..b6a7be55e 100644 --- a/java/src/test/java/org/rocksdb/LoggerTest.java +++ b/java/src/test/java/org/rocksdb/LoggerTest.java @@ -1,17 +1,16 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import static org.assertj.core.api.Assertions.assertThat; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; - -import static org.assertj.core.api.Assertions.assertThat; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; public class LoggerTest { @ClassRule @@ -30,7 +29,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -59,7 +58,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -90,7 +89,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -119,7 +118,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -130,8 +129,7 @@ public class LoggerTest { options.setLogger(logger); final List cfDescriptors = - Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); try (final RocksDB db = RocksDB.open(options, @@ -159,7 +157,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -183,7 +181,7 @@ public class LoggerTest { final Logger logger = new Logger(options) { // Create new logger with max log level passed by options @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); @@ -201,20 +199,18 @@ public class LoggerTest { @Test public void changeLogLevelAtRuntime() throws RocksDBException { final AtomicInteger logMessageCounter = new AtomicInteger(); - try (final Options options = new Options(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). - setCreateIfMissing(true); + try (final Options options = + new Options().setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).setCreateIfMissing(true); // Create new logger with max log level passed by options final Logger logger = new Logger(options) { @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { assertThat(logMsg).isNotNull(); assertThat(logMsg.length()).isGreaterThan(0); logMessageCounter.incrementAndGet(); } - } - ) { + }) { // Set custom logger to options options.setLogger(logger); diff --git a/java/src/test/java/org/rocksdb/MemTableTest.java b/java/src/test/java/org/rocksdb/MemTableTest.java index 73ac589a9..6ebf9ef51 100644 --- a/java/src/test/java/org/rocksdb/MemTableTest.java +++ b/java/src/test/java/org/rocksdb/MemTableTest.java @@ -20,8 +20,7 @@ public class MemTableTest { public void hashSkipListMemTable() throws RocksDBException { try(final Options options = new Options()) { // Test HashSkipListMemTableConfig - HashSkipListMemTableConfig memTableConfig = - new HashSkipListMemTableConfig(); + final HashSkipListMemTableConfig memTableConfig = new HashSkipListMemTableConfig(); assertThat(memTableConfig.bucketCount()). isEqualTo(1000000); memTableConfig.setBucketCount(2000000); @@ -44,8 +43,7 @@ public class MemTableTest { @Test public void skipListMemTable() throws RocksDBException { try(final Options options = new Options()) { - SkipListMemTableConfig skipMemTableConfig = - new SkipListMemTableConfig(); + final SkipListMemTableConfig skipMemTableConfig = new SkipListMemTableConfig(); assertThat(skipMemTableConfig.lookahead()). isEqualTo(0); skipMemTableConfig.setLookahead(20); @@ -58,7 +56,7 @@ public class MemTableTest { @Test public void hashLinkedListMemTable() throws RocksDBException { try(final Options options = new Options()) { - HashLinkedListMemTableConfig hashLinkedListMemTableConfig = + final HashLinkedListMemTableConfig hashLinkedListMemTableConfig = new HashLinkedListMemTableConfig(); assertThat(hashLinkedListMemTableConfig.bucketCount()). isEqualTo(50000); @@ -98,8 +96,7 @@ public class MemTableTest { @Test public void vectorMemTable() throws RocksDBException { try(final Options options = new Options()) { - VectorMemTableConfig vectorMemTableConfig = - new VectorMemTableConfig(); + final VectorMemTableConfig vectorMemTableConfig = new VectorMemTableConfig(); assertThat(vectorMemTableConfig.reservedSize()). isEqualTo(0); vectorMemTableConfig.setReservedSize(123); diff --git a/java/src/test/java/org/rocksdb/MemoryUtilTest.java b/java/src/test/java/org/rocksdb/MemoryUtilTest.java index 1bea02379..bfdcb9fe1 100644 --- a/java/src/test/java/org/rocksdb/MemoryUtilTest.java +++ b/java/src/test/java/org/rocksdb/MemoryUtilTest.java @@ -45,10 +45,9 @@ public class MemoryUtilTest { new FlushOptions().setWaitForFlush(true); final RocksDB db = RocksDB.open(options, dbFolder1.getRoot().getAbsolutePath())) { - - List dbs = new ArrayList<>(1); + final List dbs = new ArrayList<>(1); dbs.add(db); - Set caches = new HashSet<>(1); + final Set caches = new HashSet<>(1); caches.add(cache); Map usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); @@ -85,7 +84,7 @@ public class MemoryUtilTest { */ @Test public void getApproximateMemoryUsageByTypeNulls() throws RocksDBException { - Map usage = MemoryUtil.getApproximateMemoryUsageByType(null, null); + final Map usage = MemoryUtil.getApproximateMemoryUsageByType(null, null); assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(null); assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(null); @@ -98,38 +97,32 @@ public class MemoryUtilTest { */ @Test public void getApproximateMemoryUsageByTypeMultiple() throws RocksDBException { - try (final Cache cache1 = new LRUCache(1 * 1024 * 1024); - final Options options1 = - new Options() - .setCreateIfMissing(true) - .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache1)); - final RocksDB db1 = - RocksDB.open(options1, dbFolder1.getRoot().getAbsolutePath()); - final Cache cache2 = new LRUCache(1 * 1024 * 1024); - final Options options2 = - new Options() - .setCreateIfMissing(true) - .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache2)); - final RocksDB db2 = - RocksDB.open(options2, dbFolder2.getRoot().getAbsolutePath()); - final FlushOptions flushOptions = - new FlushOptions().setWaitForFlush(true); + try (final Cache cache1 = new LRUCache(1024 * 1024); + final Options options1 = new Options().setCreateIfMissing(true).setTableFormatConfig( + new BlockBasedTableConfig().setBlockCache(cache1)); + final RocksDB db1 = RocksDB.open(options1, dbFolder1.getRoot().getAbsolutePath()); + final Cache cache2 = new LRUCache(1024 * 1024); + final Options options2 = new Options().setCreateIfMissing(true).setTableFormatConfig( + new BlockBasedTableConfig().setBlockCache(cache2)); + final RocksDB db2 = RocksDB.open(options2, dbFolder2.getRoot().getAbsolutePath()); + final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true) ) { - List dbs = new ArrayList<>(1); + final List dbs = new ArrayList<>(1); dbs.add(db1); dbs.add(db2); - Set caches = new HashSet<>(1); + final Set caches = new HashSet<>(1); caches.add(cache1); caches.add(cache2); - for (RocksDB db: dbs) { + for (final RocksDB db : dbs) { db.put(key, value); db.flush(flushOptions); db.get(key); } - Map usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); + final Map usage = + MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo( db1.getAggregatedLongProperty(MEMTABLE_SIZE) + db2.getAggregatedLongProperty(MEMTABLE_SIZE)); assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo( @@ -137,7 +130,6 @@ public class MemoryUtilTest { assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo( db1.getAggregatedLongProperty(TABLE_READERS) + db2.getAggregatedLongProperty(TABLE_READERS)); assertThat(usage.get(MemoryUsageType.kCacheTotal)).isGreaterThan(0); - } } diff --git a/java/src/test/java/org/rocksdb/MergeTest.java b/java/src/test/java/org/rocksdb/MergeTest.java index a840eb104..f99ac49d3 100644 --- a/java/src/test/java/org/rocksdb/MergeTest.java +++ b/java/src/test/java/org/rocksdb/MergeTest.java @@ -45,14 +45,16 @@ public class MergeTest { } } - private byte[] longToByteArray(long l) { - ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); + private byte[] longToByteArray(final long l) { + final ByteBuffer buf = + ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); buf.putLong(l); return buf.array(); } - private long longFromByteArray(byte[] a) { - ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); + private long longFromByteArray(final byte[] a) { + final ByteBuffer buf = + ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); buf.put(a); buf.flip(); return buf.getLong(); @@ -106,9 +108,8 @@ public class MergeTest { db.merge(columnFamilyHandleList.get(1), "cfkey".getBytes(), "bb".getBytes()); - byte[] value = db.get(columnFamilyHandleList.get(1), - "cfkey".getBytes()); - String strValue = new String(value); + final byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); + final String strValue = new String(value); assertThat(strValue).isEqualTo("aa,bb"); } finally { for (final ColumnFamilyHandle handle : columnFamilyHandleList) { @@ -147,9 +148,8 @@ public class MergeTest { // merge (long)157 under key db.merge(columnFamilyHandleList.get(1), "cfkey".getBytes(), longToByteArray(157)); - byte[] value = db.get(columnFamilyHandleList.get(1), - "cfkey".getBytes()); - long longValue = longFromByteArray(value); + final byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); + final long longValue = longFromByteArray(value); assertThat(longValue).isEqualTo(257); } finally { for (final ColumnFamilyHandle handle : columnFamilyHandleList) { @@ -234,7 +234,7 @@ public class MergeTest { "cfkey".getBytes(), "bb".getBytes()); byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); - String strValue = new String(value); + final String strValue = new String(value); // Test also with createColumnFamily try (final ColumnFamilyOptions cfHandleOpts = @@ -251,7 +251,7 @@ public class MergeTest { db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), "yy".getBytes()); value = db.get(cfHandle, "cfkey2".getBytes()); - String strValueTmpCf = new String(value); + final String strValueTmpCf = new String(value); assertThat(strValue).isEqualTo("aa,bb"); assertThat(strValueTmpCf).isEqualTo("xx,yy"); @@ -296,7 +296,7 @@ public class MergeTest { "cfkey".getBytes(), longToByteArray(1)); byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); - long longValue = longFromByteArray(value); + final long longValue = longFromByteArray(value); // Test also with createColumnFamily try (final ColumnFamilyOptions cfHandleOpts = @@ -313,7 +313,7 @@ public class MergeTest { db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), longToByteArray(50)); value = db.get(cfHandle, "cfkey2".getBytes()); - long longValueTmpCf = longFromByteArray(value); + final long longValueTmpCf = longFromByteArray(value); assertThat(longValue).isEqualTo(101); assertThat(longValueTmpCf).isEqualTo(250); diff --git a/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java b/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java index cdfd9d3a9..6087b0260 100644 --- a/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java +++ b/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java @@ -51,15 +51,14 @@ public class MultiColumnRegressionTest { public void transactionDB() throws RocksDBException { final List columnFamilyDescriptors = new ArrayList<>(); for (int i = 0; i < params.numColumns; i++) { - StringBuilder sb = new StringBuilder(); + final StringBuilder sb = new StringBuilder(); sb.append("cf" + i); for (int j = 0; j < params.keySize; j++) sb.append("_cf"); columnFamilyDescriptors.add(new ColumnFamilyDescriptor(sb.toString().getBytes())); } try (final Options opt = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { - final List columnFamilyHandles = - db.createColumnFamilies(columnFamilyDescriptors); + db.createColumnFamilies(columnFamilyDescriptors); } columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); @@ -68,7 +67,7 @@ public class MultiColumnRegressionTest { new TransactionDBOptions(), dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { final WriteOptions writeOptions = new WriteOptions(); - try (Transaction transaction = tdb.beginTransaction(writeOptions)) { + try (final Transaction transaction = tdb.beginTransaction(writeOptions)) { for (int i = 0; i < params.numColumns; i++) { transaction.put( columnFamilyHandles.get(i), ("key" + i).getBytes(), ("value" + (i - 7)).getBytes()); @@ -76,7 +75,7 @@ public class MultiColumnRegressionTest { transaction.put("key".getBytes(), "value".getBytes()); transaction.commit(); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { columnFamilyHandle.close(); } } @@ -85,7 +84,7 @@ public class MultiColumnRegressionTest { try (final TransactionDB tdb = TransactionDB.open(new DBOptions().setCreateIfMissing(true), new TransactionDBOptions(), dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles2)) { - try (Transaction transaction = tdb.beginTransaction(new WriteOptions())) { + try (final Transaction transaction = tdb.beginTransaction(new WriteOptions())) { final ReadOptions readOptions = new ReadOptions(); for (int i = 0; i < params.numColumns; i++) { final byte[] value = @@ -94,7 +93,7 @@ public class MultiColumnRegressionTest { } transaction.commit(); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { columnFamilyHandle.close(); } } @@ -112,7 +111,7 @@ public class MultiColumnRegressionTest { try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( new DBOptions().setCreateIfMissing(true), dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { - try (Transaction transaction = otdb.beginTransaction(new WriteOptions())) { + try (final Transaction transaction = otdb.beginTransaction(new WriteOptions())) { for (int i = 0; i < params.numColumns; i++) { transaction.put( columnFamilyHandles.get(i), ("key" + i).getBytes(), ("value" + (i - 7)).getBytes()); @@ -120,7 +119,7 @@ public class MultiColumnRegressionTest { transaction.put("key".getBytes(), "value".getBytes()); transaction.commit(); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { columnFamilyHandle.close(); } } @@ -129,7 +128,7 @@ public class MultiColumnRegressionTest { try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( new DBOptions().setCreateIfMissing(true), dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles2)) { - try (Transaction transaction = otdb.beginTransaction(new WriteOptions())) { + try (final Transaction transaction = otdb.beginTransaction(new WriteOptions())) { final ReadOptions readOptions = new ReadOptions(); for (int i = 0; i < params.numColumns; i++) { final byte[] value = @@ -138,7 +137,7 @@ public class MultiColumnRegressionTest { } transaction.commit(); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { columnFamilyHandle.close(); } } diff --git a/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java b/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java index 90a13e1da..e66eef622 100644 --- a/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java +++ b/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java @@ -114,7 +114,7 @@ public class MultiGetManyKeysTest { transaction.multiGetAsList(new ReadOptions(), columnFamilyHandlesForMultiGet, keys); assertKeysAndValues(keys, keyValues, values); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { columnFamilyHandle.close(); } } @@ -148,7 +148,7 @@ public class MultiGetManyKeysTest { new ReadOptions(), columnFamilyHandlesForMultiGet, keys); assertKeysAndValues(keys, keyValues, values); } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { columnFamilyHandle.close(); } } @@ -178,22 +178,22 @@ public class MultiGetManyKeysTest { return keyValues; } - private void putKeysAndValues(Map keyValues) throws RocksDBException { + private void putKeysAndValues(final Map keyValues) throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { - for (Map.Entry keyValue : keyValues.entrySet()) { + for (final Map.Entry keyValue : keyValues.entrySet()) { db.put(keyValue.getKey().get(), keyValue.getValue()); } } } - private void putKeysAndValues(ColumnFamilyDescriptor columnFamilyDescriptor, - Map keyValues) throws RocksDBException { + private void putKeysAndValues(final ColumnFamilyDescriptor columnFamilyDescriptor, + final Map keyValues) throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(columnFamilyDescriptor)) { - for (Map.Entry keyValue : keyValues.entrySet()) { + for (final Map.Entry keyValue : keyValues.entrySet()) { db.put(columnFamilyHandle, keyValue.getKey().get(), keyValue.getValue()); } } @@ -213,9 +213,9 @@ public class MultiGetManyKeysTest { } } - static private class Key { + private static class Key { private final byte[] bytes; - public Key(byte[] bytes) { + public Key(final byte[] bytes) { this.bytes = bytes; } @@ -224,12 +224,12 @@ public class MultiGetManyKeysTest { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - Key key = (Key) o; + final Key key = (Key) o; return Arrays.equals(bytes, key.bytes); } diff --git a/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java index b2b2599a7..d858a150d 100644 --- a/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java @@ -122,7 +122,7 @@ public class MutableColumnFamilyOptionsTest { + "max_write_buffer_size_to_maintain=0; memtable_insert_with_hint_prefix_extractor=nullptr; level_compaction_dynamic_level_bytes=false; " + "inplace_update_support=false; experimental_mempurge_threshold=0.003"; - MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder cf = + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder cf = MutableColumnFamilyOptions.parse(optionsString, true); // Check the values from the parsed string which are column family options diff --git a/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java b/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java index 970e58c0c..1e0ded816 100644 --- a/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java +++ b/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java @@ -5,15 +5,15 @@ package org.rocksdb; +import static org.junit.Assert.assertEquals; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.util.*; -import java.util.Comparator; - -import static org.junit.Assert.assertEquals; - public class NativeComparatorWrapperTest { static { RocksDB.loadLibrary(); @@ -39,7 +39,7 @@ public class NativeComparatorWrapperTest { try (final RocksDB db = RocksDB.open(opt, dbPath)) { for (int i = 0; i < ITERATIONS; i++) { final String strKey = randomString(); - final byte key[] = strKey.getBytes(); + final byte[] key = strKey.getBytes(); // does key already exist (avoid duplicates) if (i > 0 && db.get(key) != null) { i--; // generate a different key @@ -51,12 +51,7 @@ public class NativeComparatorWrapperTest { } // sort the stored keys into ascending alpha-numeric order - Arrays.sort(storedKeys, new Comparator() { - @Override - public int compare(final String o1, final String o2) { - return o1.compareTo(o2); - } - }); + Arrays.sort(storedKeys, Comparator.naturalOrder()); // re-open db and read from start to end // string keys should be in ascending diff --git a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java index ab60081a0..6b954f67e 100644 --- a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java +++ b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java @@ -32,7 +32,7 @@ public class NativeLibraryLoaderTest { @Test public void overridesExistingLibrary() throws IOException { - File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + final File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( temporaryFolder.getRoot().getAbsolutePath()); NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( temporaryFolder.getRoot().getAbsolutePath()); diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index 129f1c39a..e1a7f8c27 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -8,8 +8,6 @@ package org.rocksdb; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.*; -import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Paths; import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,11 +26,11 @@ public class OptionsTest { @Test public void copyConstructor() { - Options origOpts = new Options(); + final Options origOpts = new Options(); origOpts.setNumLevels(rand.nextInt(8)); origOpts.setTargetFileSizeMultiplier(rand.nextInt(100)); origOpts.setLevel0StopWritesTrigger(rand.nextInt(50)); - Options copyOpts = new Options(origOpts); + final Options copyOpts = new Options(origOpts); assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels()); assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier()); assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger()); @@ -675,9 +673,8 @@ public class OptionsTest { @Test public void setWriteBufferManager() throws RocksDBException { - try (final Options opt = new Options(); - final Cache cache = new LRUCache(1 * 1024 * 1024); - final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) { + try (final Options opt = new Options(); final Cache cache = new LRUCache(1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000L, cache)) { opt.setWriteBufferManager(writeBufferManager); assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); } @@ -685,9 +682,8 @@ public class OptionsTest { @Test public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException { - try (final Options opt = new Options(); - final Cache cache = new LRUCache(1 * 1024 * 1024); - final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) { + try (final Options opt = new Options(); final Cache cache = new LRUCache(1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(0L, cache)) { opt.setWriteBufferManager(writeBufferManager); assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); } @@ -695,8 +691,8 @@ public class OptionsTest { @Test public void setWriteBufferManagerWithAllowStall() throws RocksDBException { - try (final Options opt = new Options(); final Cache cache = new LRUCache(1 * 1024 * 1024); - final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache, true)) { + try (final Options opt = new Options(); final Cache cache = new LRUCache(1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000L, cache, true)) { opt.setWriteBufferManager(writeBufferManager); assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); assertThat(opt.writeBufferManager().allowStall()).isEqualTo(true); @@ -1476,15 +1472,15 @@ public class OptionsTest { } }) { assertThat(options.setListeners(Arrays.asList(el1, el2))).isEqualTo(options); - List listeners = options.listeners(); + final List listeners = options.listeners(); assertEquals(el1, listeners.get(0)); assertEquals(el2, listeners.get(1)); - options.setListeners(Collections.emptyList()); + options.setListeners(Collections.emptyList()); listeners.get(0).onTableFileDeleted(null); assertTrue(wasCalled1.get()); listeners.get(1).onMemTableSealed(null); assertTrue(wasCalled2.get()); - List listeners2 = options.listeners(); + final List listeners2 = options.listeners(); assertNotNull(listeners2); assertEquals(0, listeners2.size()); } diff --git a/java/src/test/java/org/rocksdb/OptionsUtilTest.java b/java/src/test/java/org/rocksdb/OptionsUtilTest.java index 02bfc0025..c2975eadc 100644 --- a/java/src/test/java/org/rocksdb/OptionsUtilTest.java +++ b/java/src/test/java/org/rocksdb/OptionsUtilTest.java @@ -40,13 +40,13 @@ public class OptionsUtilTest { assertThat(db).isNotNull(); } - String fName = OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); + final String fName = OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); assertThat(fName).isNotNull(); - assert(fName.startsWith("OPTIONS-") == true); + assert (fName.startsWith("OPTIONS-")); // System.out.println("latest options fileName: " + fName); } - private void verifyOptions(TestAPI apiType) throws RocksDBException { + private void verifyOptions(final TestAPI apiType) throws RocksDBException { final String dbPath = dbFolder.getRoot().getAbsolutePath(); final Options options = new Options() .setCreateIfMissing(true) @@ -76,8 +76,8 @@ public class OptionsUtilTest { } // Read the options back and verify - DBOptions dbOptions = new DBOptions(); - ConfigOptions configOptions = + final DBOptions dbOptions = new DBOptions(); + final ConfigOptions configOptions = new ConfigOptions().setIgnoreUnknownOptions(false).setInputStringsEscaped(true).setEnv( Env.getDefault()); final List cfDescs = new ArrayList<>(); @@ -100,7 +100,7 @@ public class OptionsUtilTest { assertThat(cfDescs.get(0).getName()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); assertThat(cfDescs.get(1).getName()).isEqualTo(secondCFName); - ColumnFamilyOptions defaultCFOpts = cfDescs.get(0).getOptions(); + final ColumnFamilyOptions defaultCFOpts = cfDescs.get(0).getOptions(); assertThat(defaultCFOpts.writeBufferSize()).isEqualTo(baseDefaultCFOpts.writeBufferSize()); assertThat(defaultCFOpts.maxWriteBufferNumber()) .isEqualTo(baseDefaultCFOpts.maxWriteBufferNumber()); @@ -113,7 +113,7 @@ public class OptionsUtilTest { assertThat(defaultCFOpts.bottommostCompressionType()) .isEqualTo(baseDefaultCFOpts.bottommostCompressionType()); - ColumnFamilyOptions secondCFOpts = cfDescs.get(1).getOptions(); + final ColumnFamilyOptions secondCFOpts = cfDescs.get(1).getOptions(); assertThat(secondCFOpts.writeBufferSize()).isEqualTo(baseSecondCFOpts.writeBufferSize()); assertThat(secondCFOpts.maxWriteBufferNumber()) .isEqualTo(baseSecondCFOpts.maxWriteBufferNumber()); diff --git a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java index c813dbbb4..827eb79f9 100644 --- a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java @@ -18,7 +18,7 @@ public class PlainTableConfigTest { @Test public void keySize() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setKeySize(5); assertThat(plainTableConfig.keySize()). isEqualTo(5); @@ -26,7 +26,7 @@ public class PlainTableConfigTest { @Test public void bloomBitsPerKey() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setBloomBitsPerKey(11); assertThat(plainTableConfig.bloomBitsPerKey()). isEqualTo(11); @@ -34,7 +34,7 @@ public class PlainTableConfigTest { @Test public void hashTableRatio() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setHashTableRatio(0.95); assertThat(plainTableConfig.hashTableRatio()). isEqualTo(0.95); @@ -42,7 +42,7 @@ public class PlainTableConfigTest { @Test public void indexSparseness() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setIndexSparseness(18); assertThat(plainTableConfig.indexSparseness()). isEqualTo(18); @@ -50,7 +50,7 @@ public class PlainTableConfigTest { @Test public void hugePageTlbSize() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setHugePageTlbSize(1); assertThat(plainTableConfig.hugePageTlbSize()). isEqualTo(1); @@ -58,7 +58,7 @@ public class PlainTableConfigTest { @Test public void encodingType() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setEncodingType(EncodingType.kPrefix); assertThat(plainTableConfig.encodingType()).isEqualTo( EncodingType.kPrefix); @@ -66,13 +66,13 @@ public class PlainTableConfigTest { @Test public void fullScanMode() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setFullScanMode(true); assertThat(plainTableConfig.fullScanMode()).isTrue(); } @Test public void storeIndexInFile() { - PlainTableConfig plainTableConfig = new PlainTableConfig(); + final PlainTableConfig plainTableConfig = new PlainTableConfig(); plainTableConfig.setStoreIndexInFile(true); assertThat(plainTableConfig.storeIndexInFile()). isTrue(); diff --git a/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java b/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java index 471ef0728..7835737ae 100644 --- a/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java +++ b/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java @@ -113,12 +113,12 @@ public class PutMultiplePartsTest { final List keys = generateItemsAsList("key", ":", numParts); final byte[][] values = generateItems("value", "", numParts); - StringBuilder singleKey = new StringBuilder(); + final StringBuilder singleKey = new StringBuilder(); for (int i = 0; i < numParts; i++) { singleKey.append(new String(keys.get(i), StandardCharsets.UTF_8)); } final byte[] result = db.get(singleKey.toString().getBytes()); - StringBuilder singleValue = new StringBuilder(); + final StringBuilder singleValue = new StringBuilder(); for (int i = 0; i < numParts; i++) { singleValue.append(new String(values[i], StandardCharsets.UTF_8)); } @@ -136,12 +136,12 @@ public class PutMultiplePartsTest { final List keys = generateItemsAsList("key", ":", numParts); final byte[][] values = generateItems("value", "", numParts); - StringBuilder singleKey = new StringBuilder(); + final StringBuilder singleKey = new StringBuilder(); for (int i = 0; i < numParts; i++) { singleKey.append(new String(keys.get(i), StandardCharsets.UTF_8)); } final byte[] result = db.get(columnFamilyHandles.get(0), singleKey.toString().getBytes()); - StringBuilder singleValue = new StringBuilder(); + final StringBuilder singleValue = new StringBuilder(); for (int i = 0; i < numParts; i++) { singleValue.append(new String(values[i], StandardCharsets.UTF_8)); } diff --git a/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/java/src/test/java/org/rocksdb/ReadOnlyTest.java index 5b40a5df1..99549b61b 100644 --- a/java/src/test/java/org/rocksdb/ReadOnlyTest.java +++ b/java/src/test/java/org/rocksdb/ReadOnlyTest.java @@ -4,17 +4,16 @@ // (found in the LICENSE.Apache file in the root directory). package org.rocksdb; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; - public class ReadOnlyTest { @ClassRule @@ -71,14 +70,14 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToWriteInReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true)) { - try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + try (final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { // no-op } } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = - Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); try (final RocksDB rDb = RocksDB.openReadOnly(dbFolder.getRoot().getAbsolutePath(), @@ -92,15 +91,13 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToCFWriteInReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { //no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) - ); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); try (final RocksDB rDb = RocksDB.openReadOnly( @@ -114,15 +111,13 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToRemoveInReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { //no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) - ); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); @@ -138,23 +133,20 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToCFRemoveInReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { //no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) - ); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, readOnlyColumnFamilyHandleList)) { - rDb.delete(readOnlyColumnFamilyHandleList.get(0), - "key".getBytes()); + rDb.delete(readOnlyColumnFamilyHandleList.get(0), "key".getBytes()); } } } @@ -162,15 +154,13 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToWriteBatchReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { //no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) - ); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); @@ -179,8 +169,8 @@ public class ReadOnlyTest { readOnlyColumnFamilyHandleList); final WriteBatch wb = new WriteBatch(); final WriteOptions wOpts = new WriteOptions()) { - wb.put("key".getBytes(), "value".getBytes()); - rDb.write(wOpts, wb); + wb.put("key".getBytes(), "value".getBytes()); + rDb.write(wOpts, wb); } } } @@ -188,15 +178,13 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void failToCFWriteBatchReadOnly() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { //no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) - ); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); @@ -205,9 +193,8 @@ public class ReadOnlyTest { readOnlyColumnFamilyHandleList); final WriteBatch wb = new WriteBatch(); final WriteOptions wOpts = new WriteOptions()) { - wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), - "value".getBytes()); - rDb.write(wOpts, wb); + wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); + rDb.write(wOpts, wb); } } } @@ -215,18 +202,19 @@ public class ReadOnlyTest { @Test(expected = RocksDBException.class) public void errorIfWalFileExists() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); - final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + final RocksDB ignored = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { // no-op } try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { - final List cfDescriptors = - Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + final List cfDescriptors = Collections.singletonList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); final List readOnlyColumnFamilyHandleList = new ArrayList<>(); try (final DBOptions options = new DBOptions(); - final RocksDB rDb = RocksDB.openReadOnly(options, dbFolder.getRoot().getAbsolutePath(), - cfDescriptors, readOnlyColumnFamilyHandleList, true);) { + final RocksDB ignored = + RocksDB.openReadOnly(options, dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList, true)) { // no-op... should have raised an error as errorIfWalFileExists=true } } diff --git a/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java index 156dd3730..1bc24b984 100644 --- a/java/src/test/java/org/rocksdb/ReadOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -160,7 +160,7 @@ public class ReadOptionsTest { @Test public void iterateUpperBound() { try (final ReadOptions opt = new ReadOptions()) { - Slice upperBound = buildRandomSlice(); + final Slice upperBound = buildRandomSlice(); opt.setIterateUpperBound(upperBound); assertThat(Arrays.equals(upperBound.data(), opt.iterateUpperBound().data())).isTrue(); opt.setIterateUpperBound(null); @@ -178,7 +178,7 @@ public class ReadOptionsTest { @Test public void iterateLowerBound() { try (final ReadOptions opt = new ReadOptions()) { - Slice lowerBound = buildRandomSlice(); + final Slice lowerBound = buildRandomSlice(); opt.setIterateLowerBound(lowerBound); assertThat(Arrays.equals(lowerBound.data(), opt.iterateLowerBound().data())).isTrue(); opt.setIterateLowerBound(null); @@ -212,7 +212,7 @@ public class ReadOptionsTest { @Test public void timestamp() { try (final ReadOptions opt = new ReadOptions()) { - Slice timestamp = buildRandomSlice(); + final Slice timestamp = buildRandomSlice(); opt.setTimestamp(timestamp); assertThat(Arrays.equals(timestamp.data(), opt.timestamp().data())).isTrue(); opt.setTimestamp(null); @@ -223,7 +223,7 @@ public class ReadOptionsTest { @Test public void iterStartTs() { try (final ReadOptions opt = new ReadOptions()) { - Slice itertStartTsSlice = buildRandomSlice(); + final Slice itertStartTsSlice = buildRandomSlice(); opt.setIterStartTs(itertStartTsSlice); assertThat(Arrays.equals(itertStartTsSlice.data(), opt.iterStartTs().data())).isTrue(); opt.setIterStartTs(null); @@ -234,24 +234,24 @@ public class ReadOptionsTest { @Test public void deadline() { try (final ReadOptions opt = new ReadOptions()) { - opt.setDeadline(1999l); - assertThat(opt.deadline()).isEqualTo(1999l); + opt.setDeadline(1999L); + assertThat(opt.deadline()).isEqualTo(1999L); } } @Test public void ioTimeout() { try (final ReadOptions opt = new ReadOptions()) { - opt.setIoTimeout(34555l); - assertThat(opt.ioTimeout()).isEqualTo(34555l); + opt.setIoTimeout(34555L); + assertThat(opt.ioTimeout()).isEqualTo(34555L); } } @Test public void valueSizeSoftLimit() { try (final ReadOptions opt = new ReadOptions()) { - opt.setValueSizeSoftLimit(12134324l); - assertThat(opt.valueSizeSoftLimit()).isEqualTo(12134324l); + opt.setValueSizeSoftLimit(12134324L); + assertThat(opt.valueSizeSoftLimit()).isEqualTo(12134324L); } } @@ -351,8 +351,7 @@ public class ReadOptionsTest { } } - private ReadOptions setupUninitializedReadOptions( - ExpectedException exception) { + private ReadOptions setupUninitializedReadOptions(final ExpectedException exception) { final ReadOptions readOptions = new ReadOptions(); readOptions.close(); exception.expect(AssertionError.class); @@ -361,7 +360,7 @@ public class ReadOptionsTest { private Slice buildRandomSlice() { final Random rand = new Random(); - byte[] sliceBytes = new byte[rand.nextInt(100) + 1]; + final byte[] sliceBytes = new byte[rand.nextInt(100) + 1]; rand.nextBytes(sliceBytes); return new Slice(sliceBytes); } diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index 488dbafe8..d0e7b4f38 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -48,8 +48,8 @@ public class RocksDBTest { public void openWhenOpen() throws RocksDBException { final String dbPath = dbFolder.getRoot().getAbsolutePath(); - try (final RocksDB db1 = RocksDB.open(dbPath)) { - try (final RocksDB db2 = RocksDB.open(dbPath)) { + try (final RocksDB ignored = RocksDB.open(dbPath)) { + try (final RocksDB ignored1 = RocksDB.open(dbPath)) { fail("Should have thrown an exception when opening the same db twice"); } catch (final RocksDBException e) { assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.IOError); @@ -74,11 +74,10 @@ public class RocksDBTest { } final List cfHandles = new ArrayList<>(); - try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), - Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), - new ColumnFamilyDescriptor(col1Name)), - cfHandles)) { + try (final RocksDB ignored = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name)), + cfHandles)) { try { assertThat(cfHandles.size()).isEqualTo(2); assertThat(cfHandles.get(1)).isNotNull(); @@ -117,12 +116,10 @@ public class RocksDBTest { } cfHandles = new ArrayList<>(); - try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), - Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), - new ColumnFamilyDescriptor(col1Name), - new ColumnFamilyDescriptor(col2Name)), - cfHandles)) { + try (final RocksDB ignored = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { try { assertThat(cfHandles.size()).isEqualTo(3); assertThat(cfHandles.get(1)).isNotNull(); @@ -163,12 +160,10 @@ public class RocksDBTest { } cfHandles = new ArrayList<>(); - try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), - Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), - new ColumnFamilyDescriptor(col1Name), - new ColumnFamilyDescriptor(col2Name)), - cfHandles)) { + try (final RocksDB ignored = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { try { assertThat(cfHandles.size()).isEqualTo(3); assertThat(cfHandles.get(1)).isNotNull(); @@ -194,8 +189,8 @@ public class RocksDBTest { assertThat(db.get("key2".getBytes())).isEqualTo( "12345678".getBytes()); - ByteBuffer key = ByteBuffer.allocateDirect(12); - ByteBuffer value = ByteBuffer.allocateDirect(12); + final ByteBuffer key = ByteBuffer.allocateDirect(12); + final ByteBuffer value = ByteBuffer.allocateDirect(12); key.position(4); key.put("key3".getBytes()); key.position(4).limit(8); @@ -213,14 +208,14 @@ public class RocksDBTest { key.position(4); - ByteBuffer result = ByteBuffer.allocateDirect(12); + final ByteBuffer result = ByteBuffer.allocateDirect(12); assertThat(db.get(optr, key, result)).isEqualTo(4); assertThat(result.position()).isEqualTo(0); assertThat(result.limit()).isEqualTo(4); assertThat(key.position()).isEqualTo(8); assertThat(key.limit()).isEqualTo(8); - byte[] tmp = new byte[4]; + final byte[] tmp = new byte[4]; result.get(tmp); assertThat(tmp).isEqualTo("val3".getBytes()); @@ -232,15 +227,15 @@ public class RocksDBTest { assertThat(result.limit()).isEqualTo(12); assertThat(key.position()).isEqualTo(8); assertThat(key.limit()).isEqualTo(8); - byte[] tmp2 = new byte[3]; + final byte[] tmp2 = new byte[3]; result.get(tmp2); assertThat(tmp2).isEqualTo("val".getBytes()); // put - Segment key3 = sliceSegment("key3"); - Segment key4 = sliceSegment("key4"); - Segment value0 = sliceSegment("value 0"); - Segment value1 = sliceSegment("value 1"); + final Segment key3 = sliceSegment("key3"); + final Segment key4 = sliceSegment("key4"); + final Segment value0 = sliceSegment("value 0"); + final Segment value1 = sliceSegment("value 1"); db.put(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len); db.put(opt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len); @@ -250,8 +245,8 @@ public class RocksDBTest { } } - private static Segment sliceSegment(String key) { - ByteBuffer rawKey = ByteBuffer.allocate(key.length() + 4); + private static Segment sliceSegment(final String key) { + final ByteBuffer rawKey = ByteBuffer.allocate(key.length() + 4); rawKey.put((byte)0); rawKey.put((byte)0); rawKey.put(key.getBytes()); @@ -264,7 +259,7 @@ public class RocksDBTest { final int offset; final int len; - public boolean isSamePayload(byte[] value) { + public boolean isSamePayload(final byte[] value) { if (value == null) { return false; } @@ -281,7 +276,7 @@ public class RocksDBTest { return true; } - public Segment(byte[] value, int offset, int len) { + public Segment(final byte[] value, final int offset, final int len) { this.data = value; this.offset = offset; this.len = len; @@ -323,7 +318,7 @@ public class RocksDBTest { RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); - byte[] outValue = new byte[5]; + final byte[] outValue = new byte[5]; // not found value int getResult = db.get("keyNotFound".getBytes(), outValue); assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); @@ -344,7 +339,7 @@ public class RocksDBTest { final ReadOptions rOpt = new ReadOptions()) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); - byte[] outValue = new byte[5]; + final byte[] outValue = new byte[5]; // not found value int getResult = db.get(rOpt, "keyNotFound".getBytes(), outValue); @@ -368,9 +363,9 @@ public class RocksDBTest { final int numberOfValueSplits = 10; final int splitSize = Integer.MAX_VALUE / numberOfValueSplits; - Runtime runtime = Runtime.getRuntime(); - long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3); - boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory; + final Runtime runtime = Runtime.getRuntime(); + final long neededMemory = ((long) (splitSize)) * (((long) numberOfValueSplits) + 3); + final boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory; Assume.assumeTrue(isEnoughMemory); final byte[] valueSplit = new byte[splitSize]; @@ -399,7 +394,7 @@ public class RocksDBTest { final ReadOptions rOpt = new ReadOptions()) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); - List lookupKeys = new ArrayList<>(); + final List lookupKeys = new ArrayList<>(); lookupKeys.add("key1".getBytes()); lookupKeys.add("key2".getBytes()); List results = db.multiGetAsList(lookupKeys); @@ -454,10 +449,10 @@ public class RocksDBTest { assertThat(db.get("key2".getBytes())).isEqualTo( "xxxx".getBytes()); - Segment key3 = sliceSegment("key3"); - Segment key4 = sliceSegment("key4"); - Segment value0 = sliceSegment("value 0"); - Segment value1 = sliceSegment("value 1"); + final Segment key3 = sliceSegment("key3"); + final Segment key4 = sliceSegment("key4"); + final Segment value0 = sliceSegment("value 0"); + final Segment value1 = sliceSegment("value 1"); db.merge(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len); db.merge(wOpt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len); @@ -482,7 +477,7 @@ public class RocksDBTest { assertThat(db.get("key3".getBytes())).isEqualTo("33".getBytes()); db.delete("key1".getBytes()); db.delete(wOpt, "key2".getBytes()); - ByteBuffer key = ByteBuffer.allocateDirect(16); + final ByteBuffer key = ByteBuffer.allocateDirect(16); key.put("key3".getBytes()).flip(); db.delete(wOpt, key); assertThat(key.position()).isEqualTo(4); @@ -491,8 +486,8 @@ public class RocksDBTest { assertThat(db.get("key1".getBytes())).isNull(); assertThat(db.get("key2".getBytes())).isNull(); - Segment key3 = sliceSegment("key3"); - Segment key4 = sliceSegment("key4"); + final Segment key3 = sliceSegment("key3"); + final Segment key4 = sliceSegment("key4"); db.put("key3".getBytes(), "key3 value".getBytes()); db.put("key4".getBytes(), "key4 value".getBytes()); @@ -590,7 +585,7 @@ public class RocksDBTest { final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put((String.valueOf(i)).getBytes(), b); @@ -631,7 +626,7 @@ public class RocksDBTest { columnFamilyHandles)) { try { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put(columnFamilyHandles.get(1), @@ -665,7 +660,7 @@ public class RocksDBTest { final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put((String.valueOf(i)).getBytes(), b); @@ -693,12 +688,14 @@ public class RocksDBTest { final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put((String.valueOf(i)).getBytes(), b); } - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } try (final CompactRangeOptions compactRangeOpts = new CompactRangeOptions() .setChangeLevel(true) .setTargetLevel(-1) @@ -742,7 +739,7 @@ public class RocksDBTest { columnFamilyHandles)) { try { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put(columnFamilyHandles.get(1), @@ -794,7 +791,7 @@ public class RocksDBTest { .setTargetLevel(-1) .setTargetPathId(0)) { // fill database with key/value pairs - byte[] b = new byte[10000]; + final byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { rand.nextBytes(b); db.put(columnFamilyHandles.get(1), @@ -812,8 +809,7 @@ public class RocksDBTest { } @Test - public void compactRangeToLevel() - throws RocksDBException, InterruptedException { + public void compactRangeToLevel() throws RocksDBException { final int NUM_KEYS_PER_L0_FILE = 100; final int KEY_SIZE = 20; final int VALUE_SIZE = 300; @@ -842,10 +838,10 @@ public class RocksDBTest { dbFolder.getRoot().getAbsolutePath()) ) { // fill database with key/value pairs - byte[] value = new byte[VALUE_SIZE]; + final byte[] value = new byte[VALUE_SIZE]; int int_key = 0; for (int round = 0; round < 5; ++round) { - int initial_key = int_key; + final int initial_key = int_key; for (int f = 1; f <= NUM_L0_FILES; ++f) { for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { int_key += KEY_INTERVAL; @@ -854,7 +850,9 @@ public class RocksDBTest { db.put(String.format("%020d", int_key).getBytes(), value); } - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } // Make sure we do create one more L0 files. assertThat( db.getProperty("rocksdb.num-files-at-level0")). @@ -887,7 +885,7 @@ public class RocksDBTest { } @Test - public void deleteFilesInRange() throws RocksDBException, InterruptedException { + public void deleteFilesInRange() throws RocksDBException { final int KEY_SIZE = 20; final int VALUE_SIZE = 1000; final int FILE_SIZE = 64000; @@ -899,7 +897,7 @@ public class RocksDBTest { * we will be deleting using deleteFilesInRange. * It is writing roughly number of keys that will fit in 10 files (target size) * It is writing interleaved so that files from memory on L0 will overlap - * Then compaction cleans everything and we should end up with 10 files + * Then compaction cleans everything, and we should end up with 10 files */ try (final Options opt = new Options() .setCreateIfMissing(true) @@ -908,10 +906,10 @@ public class RocksDBTest { .setWriteBufferSize(FILE_SIZE / 2) .setDisableAutoCompactions(true); final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { - int records = FILE_SIZE / (KEY_SIZE + VALUE_SIZE); + final int records = FILE_SIZE / (KEY_SIZE + VALUE_SIZE); // fill database with key/value pairs - byte[] value = new byte[VALUE_SIZE]; + final byte[] value = new byte[VALUE_SIZE]; int key_init = 0; for (int o = 0; o < NUM_FILES; ++o) { int int_key = key_init++; @@ -922,7 +920,9 @@ public class RocksDBTest { db.put(String.format("%020d", int_key).getBytes(), value); } } - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } db.compactRange(); // Make sure we do create one more L0 files. assertThat(db.getProperty("rocksdb.num-files-at-level0")).isEqualTo("0"); @@ -987,10 +987,10 @@ public class RocksDBTest { columnFamilyHandles)) { try { // fill database with key/value pairs - byte[] value = new byte[VALUE_SIZE]; + final byte[] value = new byte[VALUE_SIZE]; int int_key = 0; for (int round = 0; round < 5; ++round) { - int initial_key = int_key; + final int initial_key = int_key; for (int f = 1; f <= NUM_L0_FILES; ++f) { for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { int_key += KEY_INTERVAL; @@ -1000,8 +1000,9 @@ public class RocksDBTest { String.format("%020d", int_key).getBytes(), value); } - db.flush(new FlushOptions().setWaitForFlush(true), - columnFamilyHandles.get(1)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions, columnFamilyHandles.get(1)); + } // Make sure we do create one more L0 files. assertThat( db.getProperty(columnFamilyHandles.get(1), @@ -1069,10 +1070,13 @@ public class RocksDBTest { db.cancelAllBackgroundWork(true); try { db.put(new byte[KEY_SIZE], new byte[VALUE_SIZE]); - db.flush(new FlushOptions().setWaitForFlush(true)); + try (final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { + db.flush(flushOptions); + } fail("Expected RocksDBException to be thrown if we attempt to trigger a flush after" + " all background work is cancelled."); - } catch (RocksDBException ignored) { } + } catch (final RocksDBException ignored) { + } } finally { for (final ColumnFamilyHandle handle : columnFamilyHandles) { handle.close(); @@ -1158,14 +1162,16 @@ public class RocksDBTest { @Test public void destroyDB() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true)) { - String dbPath = dbFolder.getRoot().getAbsolutePath(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath)) { db.put("key1".getBytes(), "value".getBytes()); } - assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0) + assertThat(dbFolder.getRoot().exists() + && Objects.requireNonNull(dbFolder.getRoot().listFiles()).length != 0) .isTrue(); RocksDB.destroyDB(dbPath, options); - assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0) + assertThat(dbFolder.getRoot().exists() + && Objects.requireNonNull(dbFolder.getRoot().listFiles()).length != 0) .isFalse(); } } @@ -1173,8 +1179,8 @@ public class RocksDBTest { @Test(expected = RocksDBException.class) public void destroyDBFailIfOpen() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true)) { - String dbPath = dbFolder.getRoot().getAbsolutePath(); - try (final RocksDB db = RocksDB.open(options, dbPath)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB ignored = RocksDB.open(options, dbPath)) { // Fails as the db is open and locked. RocksDB.destroyDB(dbPath, options); } @@ -1183,9 +1189,9 @@ public class RocksDBTest { @Test public void getApproximateSizes() throws RocksDBException { - final byte key1[] = "key1".getBytes(UTF_8); - final byte key2[] = "key2".getBytes(UTF_8); - final byte key3[] = "key3".getBytes(UTF_8); + final byte[] key1 = "key1".getBytes(UTF_8); + final byte[] key2 = "key2".getBytes(UTF_8); + final byte[] key3 = "key3".getBytes(UTF_8); try (final Options options = new Options().setCreateIfMissing(true)) { final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath)) { @@ -1210,9 +1216,9 @@ public class RocksDBTest { @Test public void getApproximateMemTableStats() throws RocksDBException { - final byte key1[] = "key1".getBytes(UTF_8); - final byte key2[] = "key2".getBytes(UTF_8); - final byte key3[] = "key3".getBytes(UTF_8); + final byte[] key1 = "key1".getBytes(UTF_8); + final byte[] key2 = "key2".getBytes(UTF_8); + final byte[] key3 = "key3".getBytes(UTF_8); try (final Options options = new Options().setCreateIfMissing(true)) { final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath)) { @@ -1233,9 +1239,8 @@ public class RocksDBTest { @Test public void getApproximateMemTableStatsSingleKey() throws RocksDBException { - final byte key1[] = "key1".getBytes(UTF_8); - final byte key2[] = "key2".getBytes(UTF_8); - final byte key3[] = "key3".getBytes(UTF_8); + final byte[] key1 = "key1".getBytes(UTF_8); + final byte[] key3 = "key3".getBytes(UTF_8); try (final Options options = new Options().setCreateIfMissing(true)) { final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath)) { @@ -1285,9 +1290,7 @@ public class RocksDBTest { ); final List cfHandles = new ArrayList<>(); try (final DBOptions dbOptions = new DBOptions(options); - final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors, - cfHandles); - ) { + final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors, cfHandles)) { try (final FlushOptions flushOptions = new FlushOptions() .setWaitForFlush(true) .setAllowWriteStall(true); @@ -1320,9 +1323,8 @@ public class RocksDBTest { public void enableAutoCompaction() throws RocksDBException { try (final DBOptions options = new DBOptions() .setCreateIfMissing(true)) { - final List cfDescs = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) - ); + final List cfDescs = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { @@ -1476,9 +1478,8 @@ public class RocksDBTest { public void getColumnFamilyMetaData() throws RocksDBException { try (final DBOptions options = new DBOptions() .setCreateIfMissing(true)) { - final List cfDescs = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) - ); + final List cfDescs = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { @@ -1512,9 +1513,8 @@ public class RocksDBTest { public void getPropertiesOfAllTables() throws RocksDBException { try (final DBOptions options = new DBOptions() .setCreateIfMissing(true)) { - final List cfDescs = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) - ); + final List cfDescs = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { @@ -1536,9 +1536,8 @@ public class RocksDBTest { public void getPropertiesOfTablesInRange() throws RocksDBException { try (final DBOptions options = new DBOptions() .setCreateIfMissing(true)) { - final List cfDescs = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) - ); + final List cfDescs = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { @@ -1550,8 +1549,7 @@ public class RocksDBTest { new Slice("key1".getBytes(UTF_8)), new Slice("key3".getBytes(UTF_8))); final Map properties = - db.getPropertiesOfTablesInRange( - cfHandles.get(0), Arrays.asList(range)); + db.getPropertiesOfTablesInRange(cfHandles.get(0), Collections.singletonList(range)); assertThat(properties).isNotNull(); } finally { for (final ColumnFamilyHandle cfHandle : cfHandles) { @@ -1566,9 +1564,8 @@ public class RocksDBTest { public void suggestCompactRange() throws RocksDBException { try (final DBOptions options = new DBOptions() .setCreateIfMissing(true)) { - final List cfDescs = Arrays.asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) - ); + final List cfDescs = + Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); final List cfHandles = new ArrayList<>(); final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { @@ -1682,8 +1679,8 @@ public class RocksDBTest { @Override public long getFileSize() { long size = 0; - for (int i = 0; i < writes.size(); i++) { - size += writes.get(i).length; + for (final byte[] write : writes) { + size += write.length; } return size; } diff --git a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java index cce0c61e0..40b24ffa3 100644 --- a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -32,12 +32,8 @@ public class RocksMemEnvTest { }; try (final Env env = new RocksMemEnv(Env.getDefault()); - final Options options = new Options() - .setCreateIfMissing(true) - .setEnv(env); - final FlushOptions flushOptions = new FlushOptions() - .setWaitForFlush(true); - ) { + final Options options = new Options().setCreateIfMissing(true).setEnv(env); + final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true)) { try (final RocksDB db = RocksDB.open(options, "/dir/db")) { // write key/value pairs using MemEnv for (int i = 0; i < keys.length; i++) { diff --git a/java/src/test/java/org/rocksdb/SstFileReaderTest.java b/java/src/test/java/org/rocksdb/SstFileReaderTest.java index e29df99f2..ef74b08a7 100644 --- a/java/src/test/java/org/rocksdb/SstFileReaderTest.java +++ b/java/src/test/java/org/rocksdb/SstFileReaderTest.java @@ -58,7 +58,7 @@ public class SstFileReaderTest { {"direct", ByteBufferAllocator.DIRECT}, {"indirect", ByteBufferAllocator.HEAP}}); } - @Parameterized.Parameter(0) public String name; + @Parameterized.Parameter() public String name; @Parameterized.Parameter(1) public ByteBufferAllocator byteBufferAllocator; diff --git a/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/java/src/test/java/org/rocksdb/SstFileWriterTest.java index 87165bfe1..c0f4ed9f1 100644 --- a/java/src/test/java/org/rocksdb/SstFileWriterTest.java +++ b/java/src/test/java/org/rocksdb/SstFileWriterTest.java @@ -12,7 +12,7 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.junit.ClassRule; import org.junit.Rule; @@ -33,7 +33,7 @@ public class SstFileWriterTest { enum OpType { PUT, PUT_BYTES, PUT_DIRECT, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES } static class KeyValueWithOp { - KeyValueWithOp(String key, String value, OpType opType) { + KeyValueWithOp(final String key, final String value, final OpType opType) { this.key = key; this.value = value; this.opType = opType; @@ -54,14 +54,14 @@ public class SstFileWriterTest { private final String key; private final String value; private final OpType opType; - }; + } private File newSstFile(final List keyValues, - boolean useJavaBytewiseComparator) throws IOException, RocksDBException { + final boolean useJavaBytewiseComparator) throws IOException, RocksDBException { final EnvOptions envOptions = new EnvOptions(); final StringAppendOperator stringAppendOperator = new StringAppendOperator(); final Options options = new Options().setMergeOperator(stringAppendOperator); - SstFileWriter sstFileWriter = null; + final SstFileWriter sstFileWriter; ComparatorOptions comparatorOptions = null; BytewiseComparator comparator = null; if (useJavaBytewiseComparator) { @@ -77,15 +77,15 @@ public class SstFileWriterTest { try { sstFileWriter.open(sstFile.getAbsolutePath()); assertThat(sstFileWriter.fileSize()).isEqualTo(0); - for (KeyValueWithOp keyValue : keyValues) { - Slice keySlice = new Slice(keyValue.getKey()); - Slice valueSlice = new Slice(keyValue.getValue()); - byte[] keyBytes = keyValue.getKey().getBytes(); - byte[] valueBytes = keyValue.getValue().getBytes(); - ByteBuffer keyDirect = ByteBuffer.allocateDirect(keyBytes.length); + for (final KeyValueWithOp keyValue : keyValues) { + final Slice keySlice = new Slice(keyValue.getKey()); + final Slice valueSlice = new Slice(keyValue.getValue()); + final byte[] keyBytes = keyValue.getKey().getBytes(); + final byte[] valueBytes = keyValue.getValue().getBytes(); + final ByteBuffer keyDirect = ByteBuffer.allocateDirect(keyBytes.length); keyDirect.put(keyBytes); keyDirect.flip(); - ByteBuffer valueDirect = ByteBuffer.allocateDirect(valueBytes.length); + final ByteBuffer valueDirect = ByteBuffer.allocateDirect(valueBytes.length); valueDirect.put(valueBytes); valueDirect.flip(); switch (keyValue.getOpType()) { @@ -185,8 +185,8 @@ public class SstFileWriterTest { final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath()); final IngestExternalFileOptions ingestExternalFileOptions = new IngestExternalFileOptions()) { - db.ingestExternalFile(Arrays.asList(sstFile.getAbsolutePath()), - ingestExternalFileOptions); + db.ingestExternalFile( + Collections.singletonList(sstFile.getAbsolutePath()), ingestExternalFileOptions); assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes()); assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes()); @@ -222,9 +222,7 @@ public class SstFileWriterTest { .setMergeOperator(stringAppendOperator); final ColumnFamilyHandle cf_handle = db.createColumnFamily( new ColumnFamilyDescriptor("new_cf".getBytes(), cf_opts))) { - - db.ingestExternalFile(cf_handle, - Arrays.asList(sstFile.getAbsolutePath()), + db.ingestExternalFile(cf_handle, Collections.singletonList(sstFile.getAbsolutePath()), ingestExternalFileOptions); assertThat(db.get(cf_handle, diff --git a/java/src/test/java/org/rocksdb/SstPartitionerTest.java b/java/src/test/java/org/rocksdb/SstPartitionerTest.java index 74816db93..3ee739053 100644 --- a/java/src/test/java/org/rocksdb/SstPartitionerTest.java +++ b/java/src/test/java/org/rocksdb/SstPartitionerTest.java @@ -23,7 +23,7 @@ public class SstPartitionerTest { @Test public void sstFixedPrefix() throws RocksDBException { - try (SstPartitionerFixedPrefixFactory factory = new SstPartitionerFixedPrefixFactory(4); + try (final SstPartitionerFixedPrefixFactory factory = new SstPartitionerFixedPrefixFactory(4); final Options opt = new Options().setCreateIfMissing(true).setSstPartitionerFactory(factory); final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { @@ -38,7 +38,7 @@ public class SstPartitionerTest { db.compactRange(); - List metadata = db.getLiveFilesMetaData(); + final List metadata = db.getLiveFilesMetaData(); assertThat(metadata.size()).isEqualTo(2); } } @@ -65,7 +65,7 @@ public class SstPartitionerTest { db.compactRange(columnFamilyHandle); - List metadata = db.getLiveFilesMetaData(); + final List metadata = db.getLiveFilesMetaData(); assertThat(metadata.size()).isEqualTo(2); } } diff --git a/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/java/src/test/java/org/rocksdb/StatsCallbackMock.java index af8db0caa..c6a7294c9 100644 --- a/java/src/test/java/org/rocksdb/StatsCallbackMock.java +++ b/java/src/test/java/org/rocksdb/StatsCallbackMock.java @@ -9,12 +9,11 @@ public class StatsCallbackMock implements StatisticsCollectorCallback { public int tickerCallbackCount = 0; public int histCallbackCount = 0; - public void tickerCallback(TickerType tickerType, long tickerCount) { + public void tickerCallback(final TickerType tickerType, final long tickerCount) { tickerCallbackCount++; } - public void histogramCallback(HistogramType histType, - HistogramData histData) { + public void histogramCallback(final HistogramType histType, final HistogramData histData) { histCallbackCount++; } } diff --git a/java/src/test/java/org/rocksdb/TimedEnvTest.java b/java/src/test/java/org/rocksdb/TimedEnvTest.java index c958f96b2..31bad2e2e 100644 --- a/java/src/test/java/org/rocksdb/TimedEnvTest.java +++ b/java/src/test/java/org/rocksdb/TimedEnvTest.java @@ -31,10 +31,7 @@ public class TimedEnvTest { @Test public void construct_integration() throws RocksDBException { try (final Env env = new TimedEnv(Env.getDefault()); - final Options options = new Options() - .setCreateIfMissing(true) - .setEnv(env); - ) { + final Options options = new Options().setCreateIfMissing(true).setEnv(env)) { try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) { db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); } diff --git a/java/src/test/java/org/rocksdb/TransactionDBTest.java b/java/src/test/java/org/rocksdb/TransactionDBTest.java index b0ea813ff..56acb21c7 100644 --- a/java/src/test/java/org/rocksdb/TransactionDBTest.java +++ b/java/src/test/java/org/rocksdb/TransactionDBTest.java @@ -130,9 +130,8 @@ public class TransactionDBTest { final ReadOptions readOptions = new ReadOptions()) { try (final Transaction txn = tdb.beginTransaction(writeOptions)) { - - final byte key[] = "key".getBytes(UTF_8); - final byte value[] = "value".getBytes(UTF_8); + final byte[] key = "key".getBytes(UTF_8); + final byte[] value = "value".getBytes(UTF_8); txn.put(key, value); assertThat(txn.getForUpdate(readOptions, key, true)).isEqualTo(value); diff --git a/java/src/test/java/org/rocksdb/TransactionTest.java b/java/src/test/java/org/rocksdb/TransactionTest.java index 8a3067de9..b80445c5c 100644 --- a/java/src/test/java/org/rocksdb/TransactionTest.java +++ b/java/src/test/java/org/rocksdb/TransactionTest.java @@ -116,7 +116,7 @@ public class TransactionTest extends AbstractTransactionTest { txn.commit(); } - Transaction txnPrepare; + final Transaction txnPrepare; txnPrepare = dbContainer.beginTransaction(); txnPrepare.setName("txnPrepare1"); txnPrepare.put(k1, v12); @@ -147,7 +147,7 @@ public class TransactionTest extends AbstractTransactionTest { txn.commit(); } - Transaction txnPrepare; + final Transaction txnPrepare; txnPrepare = dbContainer.beginTransaction(); txnPrepare.setName("txnPrepare1"); txnPrepare.put(k1, v12); diff --git a/java/src/test/java/org/rocksdb/TtlDBTest.java b/java/src/test/java/org/rocksdb/TtlDBTest.java index ffa15e768..ebf9e9eaa 100644 --- a/java/src/test/java/org/rocksdb/TtlDBTest.java +++ b/java/src/test/java/org/rocksdb/TtlDBTest.java @@ -40,7 +40,7 @@ public class TtlDBTest { @Test public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException { try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0); - final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false);) { + final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false)) { ttlDB.put("key".getBytes(), "value".getBytes()); assertThat(ttlDB.get("key".getBytes())). isEqualTo("value".getBytes()); diff --git a/java/src/test/java/org/rocksdb/Types.java b/java/src/test/java/org/rocksdb/Types.java index c3c1de833..a6abdecbc 100644 --- a/java/src/test/java/org/rocksdb/Types.java +++ b/java/src/test/java/org/rocksdb/Types.java @@ -18,7 +18,7 @@ public class Types { * * @return An integer */ - public static int byteToInt(final byte data[]) { + public static int byteToInt(final byte[] data) { return (data[0] & 0xff) | ((data[1] & 0xff) << 8) | ((data[2] & 0xff) << 16) | diff --git a/java/src/test/java/org/rocksdb/WalFilterTest.java b/java/src/test/java/org/rocksdb/WalFilterTest.java index adeb959d1..08bc6eef5 100644 --- a/java/src/test/java/org/rocksdb/WalFilterTest.java +++ b/java/src/test/java/org/rocksdb/WalFilterTest.java @@ -63,10 +63,10 @@ public class WalFilterTest { cfDescriptors, cfHandles)) { try (final WriteOptions writeOptions = new WriteOptions()) { // Write given keys in given batches - for (int i = 0; i < batchKeys.length; i++) { + for (final byte[][] batchKey : batchKeys) { final WriteBatch batch = new WriteBatch(); - for (int j = 0; j < batchKeys[i].length; j++) { - batch.put(cfHandles.get(0), batchKeys[i][j], dummyString(1024)); + for (final byte[] bytes : batchKey) { + batch.put(cfHandles.get(0), bytes, dummyString(1024)); } db.write(writeOptions, batch); } diff --git a/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java index c5090dbce..0321da3fa 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java @@ -23,7 +23,7 @@ public class WriteBatchThreadedTest { @Parameters(name = "WriteBatchThreadedTest(threadCount={0})") public static Iterable data() { - return Arrays.asList(new Integer[]{1, 10, 50, 100}); + return Arrays.asList(1, 10, 50, 100); } @Parameter @@ -56,18 +56,15 @@ public class WriteBatchThreadedTest { final List> callables = new ArrayList<>(); for (int i = 0; i < 100; i++) { final int offset = i * 100; - callables.add(new Callable() { - @Override - public Void call() throws RocksDBException { - try (final WriteBatch wb = new WriteBatch(); - final WriteOptions w_opt = new WriteOptions()) { - for (int i = offset; i < offset + 100; i++) { - wb.put(ByteBuffer.allocate(4).putInt(i).array(), "parallel rocks test".getBytes()); - } - db.write(w_opt, wb); + callables.add(() -> { + try (final WriteBatch wb = new WriteBatch(); + final WriteOptions w_opt = new WriteOptions()) { + for (int i1 = offset; i1 < offset + 100; i1++) { + wb.put(ByteBuffer.allocate(4).putInt(i1).array(), "parallel rocks test".getBytes()); } - return null; + db.write(w_opt, wb); } + return null; }); } diff --git a/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java index 735677cb7..1e1c93fb5 100644 --- a/java/src/test/java/org/rocksdb/WriteOptionsTest.java +++ b/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -59,12 +59,12 @@ public class WriteOptionsTest { @Test public void copyConstructor() { - WriteOptions origOpts = new WriteOptions(); + final WriteOptions origOpts = new WriteOptions(); origOpts.setDisableWAL(rand.nextBoolean()); origOpts.setIgnoreMissingColumnFamilies(rand.nextBoolean()); origOpts.setSync(rand.nextBoolean()); origOpts.setMemtableInsertHintPerBatch(true); - WriteOptions copyOpts = new WriteOptions(origOpts); + final WriteOptions copyOpts = new WriteOptions(origOpts); assertThat(origOpts.disableWAL()).isEqualTo(copyOpts.disableWAL()); assertThat(origOpts.ignoreMissingColumnFamilies()).isEqualTo( copyOpts.ignoreMissingColumnFamilies()); diff --git a/util/crc32c_arm64.cc b/util/crc32c_arm64.cc index 4885f4fe1..98d1c307d 100644 --- a/util/crc32c_arm64.cc +++ b/util/crc32c_arm64.cc @@ -23,10 +23,10 @@ #include #endif #if defined(__OpenBSD__) -#include -#include -#include #include +#include +#include +#include #endif #ifdef HAVE_ARM64_CRYPTO @@ -67,13 +67,12 @@ uint32_t crc32c_runtime_check(void) { return r == 1; #elif defined(__OpenBSD__) int r = 0; - const int isar0_mib[] = { CTL_MACHDEP, CPU_ID_AA64ISAR0 }; + const int isar0_mib[] = {CTL_MACHDEP, CPU_ID_AA64ISAR0}; uint64_t isar0; size_t len = sizeof(isar0); if (sysctl(isar0_mib, 2, &isar0, &len, NULL, 0) != -1) { - if (ID_AA64ISAR0_CRC32(isar0) >= ID_AA64ISAR0_CRC32_BASE) - r = 1; + if (ID_AA64ISAR0_CRC32(isar0) >= ID_AA64ISAR0_CRC32_BASE) r = 1; } return r; #else @@ -94,13 +93,12 @@ bool crc32c_pmull_runtime_check(void) { return true; #elif defined(__OpenBSD__) bool r = false; - const int isar0_mib[] = { CTL_MACHDEP, CPU_ID_AA64ISAR0 }; + const int isar0_mib[] = {CTL_MACHDEP, CPU_ID_AA64ISAR0}; uint64_t isar0; size_t len = sizeof(isar0); if (sysctl(isar0_mib, 2, &isar0, &len, NULL, 0) != -1) { - if (ID_AA64ISAR0_AES(isar0) >= ID_AA64ISAR0_AES_PMULL) - r = true; + if (ID_AA64ISAR0_AES(isar0) >= ID_AA64ISAR0_AES_PMULL) r = true; } return r; #else