diff --git a/java/org/rocksdb/AbstractComparator.java b/java/org/rocksdb/AbstractComparator.java index 8de50e271..5302f43b3 100644 --- a/java/org/rocksdb/AbstractComparator.java +++ b/java/org/rocksdb/AbstractComparator.java @@ -39,9 +39,9 @@ public abstract class AbstractComparator * @param b Slice access to second key * * @return Should return either: - * 1) < 0 if "a" < "b" + * 1) < 0 if "a" < "b" * 2) == 0 if "a" == "b" - * 3) > 0 if "a" > "b" + * 3) > 0 if "a" > "b" */ public abstract int compare(final T a, final T b); @@ -49,7 +49,7 @@ public abstract class AbstractComparator * Used to reduce the space requirements * for internal data structures like index blocks. * - * If start < limit, you may return a new start which is a + * If start < limit, you may return a new start which is a * shorter string in [start, limit). * * Simple comparator implementations may return null if they @@ -67,7 +67,7 @@ public abstract class AbstractComparator * for internal data structures like index blocks. * * You may return a new short key (key1) where - * key1 >= key. + * key1 ≥ key. * * Simple comparator implementations may return null if they * wish to leave the key unchanged. i.e., an implementation of diff --git a/java/org/rocksdb/AbstractSlice.java b/java/org/rocksdb/AbstractSlice.java index 971bd7c1a..2b0d80c6f 100644 --- a/java/org/rocksdb/AbstractSlice.java +++ b/java/org/rocksdb/AbstractSlice.java @@ -19,10 +19,10 @@ package org.rocksdb; * instance of a C++ BaseComparatorJniCallback subclass and * passes that to RocksDB as the comparator. That subclass of * BaseComparatorJniCallback creates the Java - * {@see org.rocksdb.AbstractSlice} subclass Objects. When you dispose - * the Java {@see org.rocksdb.AbstractComparator} subclass, it disposes the + * @see org.rocksdb.AbstractSlice subclass Objects. When you dispose + * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the * C++ BaseComparatorJniCallback subclass, which in turn destroys the - * Java {@see org.rocksdb.AbstractSlice} subclass Objects. + * Java @see org.rocksdb.AbstractSlice subclass Objects. */ abstract class AbstractSlice extends RocksObject { @@ -31,7 +31,7 @@ abstract class AbstractSlice extends RocksObject { * * @return The slice data. Note, the type of access is * determined by the subclass - * @see org.rocksdb.AbstractSlice#data0(long). + * @see org.rocksdb.AbstractSlice#data0(long) */ public T data() { assert (isInitialized()); @@ -95,9 +95,9 @@ abstract class AbstractSlice extends RocksObject { * @param other A slice to compare against * * @return Should return either: - * 1) < 0 if this < other + * 1) < 0 if this < other * 2) == 0 if this == other - * 3) > 0 if this > other + * 3) > 0 if this > other */ public int compare(final AbstractSlice other) { assert (other != null); @@ -145,7 +145,6 @@ abstract class AbstractSlice extends RocksObject { /** * Deletes underlying C++ slice pointer. - *

* Note that this function should be called only after all * RocksDB instances referencing the slice are closed. * Otherwise an undefined behavior will occur. diff --git a/java/org/rocksdb/BloomFilter.java b/java/org/rocksdb/BloomFilter.java index 6772d2f54..dd2a511dd 100644 --- a/java/org/rocksdb/BloomFilter.java +++ b/java/org/rocksdb/BloomFilter.java @@ -60,7 +60,7 @@ public class BloomFilter extends Filter { * bits_per_key: bits per key in bloom filter. A good value for bits_per_key * is 10, which yields a filter with ~ 1% false positive rate. *

default bits_per_key: 10

- *

+ * *

use_block_based_builder: use block based filter rather than full filter. * If you want to builder full filter, it needs to be set to false. *

diff --git a/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 827fe8c64..fb04c249a 100644 --- a/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -266,7 +266,7 @@ public interface ColumnFamilyOptionsInterface { int numLevels(); /** - * Number of files to trigger level-0 compaction. A value < 0 means that + * Number of files to trigger level-0 compaction. A value < 0 means that * level-0 compaction will not be triggered by number of files at all. * Default: 4 * @@ -278,7 +278,7 @@ public interface ColumnFamilyOptionsInterface { /** * The number of files in level 0 to trigger compaction from level-0 to - * level-1. A value < 0 means that level-0 compaction will not be + * level-1. A value < 0 means that level-0 compaction will not be * triggered by number of files at all. * Default: 4 * @@ -288,7 +288,7 @@ public interface ColumnFamilyOptionsInterface { /** * Soft limit on number of level-0 files. We start slowing down writes at this - * point. A value < 0 means that no writing slow down will be triggered by + * point. A value < 0 means that no writing slow down will be triggered by * number of files in level-0. * * @param numFiles soft limit on number of level-0 files. @@ -299,7 +299,7 @@ public interface ColumnFamilyOptionsInterface { /** * Soft limit on the number of level-0 files. We start slowing down writes - * at this point. A value < 0 means that no writing slow down will be + * at this point. A value < 0 means that no writing slow down will be * triggered by number of files in level-0. * * @return the soft limit on the number of level-0 files. @@ -324,7 +324,7 @@ public interface ColumnFamilyOptionsInterface { /** * The highest level to which a new compacted memtable is pushed if it * does not create overlap. We try to push to level 2 to avoid the - * relatively expensive level 0=>1 compactions and to avoid some + * relatively expensive level 0≥1 compactions and to avoid some * expensive manifest file operations. We do not push all the way to * the largest level since that can generate a lot of wasted disk * space if the same key space is being repeatedly overwritten. @@ -339,7 +339,7 @@ public interface ColumnFamilyOptionsInterface { /** * The highest level to which a new compacted memtable is pushed if it * does not create overlap. We try to push to level 2 to avoid the - * relatively expensive level 0=>1 compactions and to avoid some + * relatively expensive level 0≥1 compactions and to avoid some * expensive manifest file operations. We do not push all the way to * the largest level since that can generate a lot of wasted disk * space if the same key space is being repeatedly overwritten. @@ -515,7 +515,7 @@ public interface ColumnFamilyOptionsInterface { /** * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we - * stop building a single file in a level->level+1 compaction. + * stop building a single file in a level->level+1 compaction. * * @param maxGrandparentOverlapFactor maximum bytes of overlaps in * "grandparent" level. @@ -526,7 +526,7 @@ public interface ColumnFamilyOptionsInterface { /** * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we - * stop building a single file in a level->level+1 compaction. + * stop building a single file in a level->level+1 compaction. * * @return maximum bytes of overlaps in "grandparent" level. */ @@ -535,7 +535,7 @@ public interface ColumnFamilyOptionsInterface { /** * Puts are delayed 0-1 ms when any level has a compaction score that exceeds * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not + * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not * hold, RocksDB will set soft_rate_limit = hard_rate_limit * Default: 0 (disabled) * @@ -548,7 +548,7 @@ public interface ColumnFamilyOptionsInterface { /** * Puts are delayed 0-1 ms when any level has a compaction score that exceeds * soft_rate_limit. This is ignored when == 0.0. - * CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not + * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not * hold, RocksDB will set soft_rate_limit = hard_rate_limit * Default: 0 (disabled) * @@ -558,7 +558,7 @@ public interface ColumnFamilyOptionsInterface { /** * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when <= 1.0. + * exceeds hard_rate_limit. This is ignored when ≤ 1.0. * Default: 0 (disabled) * * @param hardRateLimit the hard-rate-limit of a compaction score for put @@ -569,7 +569,7 @@ public interface ColumnFamilyOptionsInterface { /** * Puts are delayed 1ms at a time when any level has a compaction score that - * exceeds hard_rate_limit. This is ignored when <= 1.0. + * exceeds hard_rate_limit. This is ignored when ≤ 1.0. * Default: 0 (disabled) * * @return the hard-rate-limit of a compaction score for put delay. @@ -600,11 +600,11 @@ public interface ColumnFamilyOptionsInterface { /** * The size of one block in arena memory allocation. - * If <= 0, a proper value is automatically calculated (usually 1/10 of + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of * writer_buffer_size). * * There are two additonal restriction of the The specified size: - * (1) size should be in the range of [4096, 2 << 30] and + * (1) size should be in the range of [4096, 2 << 30] and * (2) be the multiple of the CPU word (which helps with the memory * alignment). * @@ -621,11 +621,11 @@ public interface ColumnFamilyOptionsInterface { /** * The size of one block in arena memory allocation. - * If <= 0, a proper value is automatically calculated (usually 1/10 of + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of * writer_buffer_size). * * There are two additonal restriction of the The specified size: - * (1) size should be in the range of [4096, 2 << 30] and + * (1) size should be in the range of [4096, 2 << 30] and * (2) be the multiple of the CPU word (which helps with the memory * alignment). * @@ -734,7 +734,7 @@ public interface ColumnFamilyOptionsInterface { boolean filterDeletes(); /** - * An iteration->Next() sequentially skips over keys with the same + * An iteration->Next() sequentially skips over keys with the same * user-key unless this option is set. This number specifies the number * of keys (with the same userkey) that will be sequentially * skipped before a reseek is issued. @@ -747,7 +747,7 @@ public interface ColumnFamilyOptionsInterface { Object setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations); /** - * An iteration->Next() sequentially skips over keys with the same + * An iteration->Next() sequentially skips over keys with the same * user-key unless this option is set. This number specifies the number * of keys (with the same userkey) that will be sequentially * skipped before a reseek is issued. @@ -794,7 +794,7 @@ public interface ColumnFamilyOptionsInterface { * If inplace_callback function is not set, * Put(key, new_value) will update inplace the existing_value iff * * key exists in current memtable - * * new sizeof(new_value) <= sizeof(existing_value) + * * new sizeof(new_value) ≤ sizeof(existing_value) * * existing_value for that key is a put i.e. kTypeValue * If inplace_callback function is set, check doc for inplace_callback. * Default: false. @@ -810,7 +810,7 @@ public interface ColumnFamilyOptionsInterface { * If inplace_callback function is not set, * Put(key, new_value) will update inplace the existing_value iff * * key exists in current memtable - * * new sizeof(new_value) <= sizeof(existing_value) + * * new sizeof(new_value) ≤ sizeof(existing_value) * * existing_value for that key is a put i.e. kTypeValue * If inplace_callback function is set, check doc for inplace_callback. * Default: false. @@ -945,7 +945,7 @@ public interface ColumnFamilyOptionsInterface { * merge will be performed. Partial merge will not be called * if the list of values to merge is less than min_partial_merge_operands. * - * If min_partial_merge_operands < 2, then it will be treated as 2. + * If min_partial_merge_operands < 2, then it will be treated as 2. * * Default: 2 * @@ -959,7 +959,7 @@ public interface ColumnFamilyOptionsInterface { * merge will be performed. Partial merge will not be called * if the list of values to merge is less than min_partial_merge_operands. * - * If min_partial_merge_operands < 2, then it will be treated as 2. + * If min_partial_merge_operands < 2, then it will be treated as 2. * * Default: 2 * diff --git a/java/org/rocksdb/DBOptionsInterface.java b/java/org/rocksdb/DBOptionsInterface.java index 35c65eed2..ca65a6146 100644 --- a/java/org/rocksdb/DBOptionsInterface.java +++ b/java/org/rocksdb/DBOptionsInterface.java @@ -502,6 +502,7 @@ public interface DBOptionsInterface { * are older than WAL_ttl_seconds will be deleted. *
  • If both are not 0, WAL files will be checked every 10 min and both * checks will be performed with ttl being first.
  • + * * * @param walTtlSeconds the ttl seconds * @return the instance of the current Object. @@ -546,6 +547,7 @@ public interface DBOptionsInterface { * are older than WAL_ttl_seconds will be deleted. *
  • If both are not 0, WAL files will be checked every 10 min and both * checks will be performed with ttl being first.
  • + * * * @param sizeLimitMB size limit in mega-bytes. * @return the instance of the current Object. diff --git a/java/org/rocksdb/PlainTableConfig.java b/java/org/rocksdb/PlainTableConfig.java index 71d75f72c..7f0d672ef 100644 --- a/java/org/rocksdb/PlainTableConfig.java +++ b/java/org/rocksdb/PlainTableConfig.java @@ -123,7 +123,7 @@ public class PlainTableConfig extends TableFormatConfig { } /** - *

    huge_page_tlb_size: if <=0, allocate hash indexes and blooms + *

    huge_page_tlb_size: if ≤0, allocate hash indexes and blooms * from malloc otherwise from huge page TLB.

    * *

    The user needs to reserve huge pages for it to be allocated, diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java index f536765f8..40680e438 100644 --- a/java/org/rocksdb/RocksDB.java +++ b/java/org/rocksdb/RocksDB.java @@ -328,7 +328,7 @@ public class RocksDB extends RocksObject { * * @param options Options for opening the database * @param path Absolute path to rocksdb database - * @return List List containing the column family names + * @return List<byte[]> List containing the column family names * * @throws RocksDBException */ @@ -462,7 +462,6 @@ public class RocksDB extends RocksObject { * to make this lighter weight is to avoid doing any IOs. * * @param readOptions {@link ReadOptions} instance - * @param columnFamilyHandle {@link ColumnFamilyHandle} instance * @param key byte array of a key to search for * @param value StringBuffer instance which is a out parameter if a value is * found in block-cache. @@ -922,13 +921,13 @@ public class RocksDB extends RocksObject { * *

    Valid property names include: *

      - *
    • "rocksdb.num-files-at-level" - return the number of files at level , - * where is an ASCII representation of a level number (e.g. "0").
    • + *
    • "rocksdb.num-files-at-level<N>" - return the number of files at level <N>, + * where <N> is an ASCII representation of a level number (e.g. "0").
    • *
    • "rocksdb.stats" - returns a multi-line string that describes statistics * about the internal operation of the DB.
    • *
    • "rocksdb.sstables" - returns a multi-line string that describes all * of the sstables that make up the db contents.
    • - *

    + * * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance @@ -951,13 +950,13 @@ public class RocksDB extends RocksObject { * *

    Valid property names include: *

      - *
    • "rocksdb.num-files-at-level" - return the number of files at level , - * where is an ASCII representation of a level number (e.g. "0").
    • + *
    • "rocksdb.num-files-at-level<N>" - return the number of files at level <N>, + * where <N> is an ASCII representation of a level number (e.g. "0").
    • *
    • "rocksdb.stats" - returns a multi-line string that describes statistics * about the internal operation of the DB.
    • *
    • "rocksdb.sstables" - returns a multi-line string that describes all * of the sstables that make up the db contents.
    • - *

    + * * * @param property to be fetched. See above for examples * @return property value diff --git a/java/org/rocksdb/RocksIterator.java b/java/org/rocksdb/RocksIterator.java index 12377b6df..acfdd3b8c 100644 --- a/java/org/rocksdb/RocksIterator.java +++ b/java/org/rocksdb/RocksIterator.java @@ -63,7 +63,7 @@ public class RocksIterator extends RocksObject { *

    Moves to the next entry in the source. After this call, Valid() is * true iff the iterator was not positioned at the last entry in the source.

    * - *

    REQUIRES: {@link #isValid()}

    + *

    REQUIRES: {@link #isValid()}

    */ public void next() { assert(isInitialized()); @@ -74,7 +74,7 @@ public class RocksIterator extends RocksObject { *

    Moves to the previous entry in the source. After this call, Valid() is * true iff the iterator was not positioned at the first entry in source.

    * - *

    REQUIRES: {@link #isValid()}

    + *

    REQUIRES: {@link #isValid()}

    */ public void prev() { assert(isInitialized()); @@ -86,7 +86,7 @@ public class RocksIterator extends RocksObject { * the returned slice is valid only until the next modification of * the iterator.

    * - *

    REQUIRES: {@link #isValid()}

    + *

    REQUIRES: {@link #isValid()}

    * * @return key for the current entry. */ @@ -100,7 +100,7 @@ public class RocksIterator extends RocksObject { * the returned slice is valid only until the next modification of * the iterator.

    * - *

    REQUIRES: !AtEnd() && !AtStart()

    + *

    REQUIRES: !AtEnd() && !AtStart()

    * @return value for the current entry. */ public byte[] value() { diff --git a/java/org/rocksdb/RocksObject.java b/java/org/rocksdb/RocksObject.java index 828bb4f3c..ff5842139 100644 --- a/java/org/rocksdb/RocksObject.java +++ b/java/org/rocksdb/RocksObject.java @@ -11,14 +11,12 @@ package org.rocksdb; * *

    * RocksObject has {@code dispose()} function, which releases its associated c++ - * resource. - *

    - *

    + * resource.

    + *

    * This function can be either called manually, or being called automatically * during the regular Java GC process. However, since Java may wrongly assume a * RocksObject only contains a long member variable and think it is small in size, - *

    - *

    Java may give {@code RocksObject} low priority in the GC process. For this, it is + * Java may give {@code RocksObject} low priority in the GC process. For this, it is * suggested to call {@code dispose()} manually. However, it is safe to let * {@code RocksObject} go out-of-scope without manually calling {@code dispose()} * as {@code dispose()} will be called in the finalizer during the diff --git a/java/org/rocksdb/Slice.java b/java/org/rocksdb/Slice.java index 4449cb7b8..fe5d8d49d 100644 --- a/java/org/rocksdb/Slice.java +++ b/java/org/rocksdb/Slice.java @@ -66,10 +66,10 @@ public class Slice extends AbstractSlice { * Deletes underlying C++ slice pointer * and any buffered data. * - *

    + *

    * Note that this function should be called only after all * RocksDB instances referencing the slice are closed. - * Otherwise an undefined behavior will occur. + * Otherwise an undefined behavior will occur.

    */ @Override protected void disposeInternal() { diff --git a/java/org/rocksdb/WriteBatch.java b/java/org/rocksdb/WriteBatch.java index 0a16d5104..118695512 100644 --- a/java/org/rocksdb/WriteBatch.java +++ b/java/org/rocksdb/WriteBatch.java @@ -41,14 +41,14 @@ public class WriteBatch extends RocksObject { public native int count(); /** - * Store the mapping "key->value" in the database. + * Store the mapping "key->value" in the database. */ public void put(byte[] key, byte[] value) { put(key, key.length, value, value.length); } /** - * Store the mapping "key->value" within given column + * Store the mapping "key->value" within given column * family. */ public void put(ColumnFamilyHandle columnFamilyHandle, @@ -59,7 +59,7 @@ public class WriteBatch extends RocksObject { /** * Merge "value" with the existing value of "key" in the database. - * "key->merge(existing, value)" + * "key->merge(existing, value)" */ public void merge(byte[] key, byte[] value) { merge(key, key.length, value, value.length); @@ -67,7 +67,7 @@ public class WriteBatch extends RocksObject { /** * Merge "value" with the existing value of "key" in given column family. - * "key->merge(existing, value)" + * "key->merge(existing, value)" */ public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { diff --git a/java/org/rocksdb/test/AbstractComparatorTest.java b/java/org/rocksdb/test/AbstractComparatorTest.java index dfdb3cad9..7f4c47fb3 100644 --- a/java/org/rocksdb/test/AbstractComparatorTest.java +++ b/java/org/rocksdb/test/AbstractComparatorTest.java @@ -104,7 +104,7 @@ public abstract class AbstractComparatorTest { * @param a 4-bytes representing an integer key * @param b 4-bytes representing an integer key * - * @return negative if a < b, 0 if a == b, positive otherwise + * @return negative if a < b, 0 if a == b, positive otherwise */ protected final int compareIntKeys(final byte[] a, final byte[] b) { diff --git a/java/org/rocksdb/test/PlatformRandomHelper.java b/java/org/rocksdb/test/PlatformRandomHelper.java index b0ef8d8a6..c729c3dc1 100644 --- a/java/org/rocksdb/test/PlatformRandomHelper.java +++ b/java/org/rocksdb/test/PlatformRandomHelper.java @@ -38,7 +38,7 @@ public class PlatformRandomHelper { /** * Random32Bit is a class which overrides {@code nextLong} to * provide random numbers which fit in size_t. This workaround - * is necessary because there is no unsigned_int < Java 8 + * is necessary because there is no unsigned_int < Java 8 */ private static class Random32Bit extends Random { @Override