Merge pull request #386 from EugenePig/java8

suppress JDK8 errors for #385
main
Igor Canadi 10 years ago
commit b52b144c73
  1. 8
      java/org/rocksdb/AbstractComparator.java
  2. 13
      java/org/rocksdb/AbstractSlice.java
  3. 2
      java/org/rocksdb/BloomFilter.java
  4. 44
      java/org/rocksdb/ColumnFamilyOptionsInterface.java
  5. 2
      java/org/rocksdb/DBOptionsInterface.java
  6. 2
      java/org/rocksdb/PlainTableConfig.java
  7. 15
      java/org/rocksdb/RocksDB.java
  8. 8
      java/org/rocksdb/RocksIterator.java
  9. 8
      java/org/rocksdb/RocksObject.java
  10. 4
      java/org/rocksdb/Slice.java
  11. 8
      java/org/rocksdb/WriteBatch.java
  12. 2
      java/org/rocksdb/test/AbstractComparatorTest.java
  13. 2
      java/org/rocksdb/test/PlatformRandomHelper.java

@ -39,9 +39,9 @@ public abstract class AbstractComparator<T extends AbstractSlice>
* @param b Slice access to second key
*
* @return Should return either:
* 1) < 0 if "a" < "b"
* 1) &lt; 0 if "a" &lt; "b"
* 2) == 0 if "a" == "b"
* 3) > 0 if "a" > "b"
* 3) &gt; 0 if "a" &gt; "b"
*/
public abstract int compare(final T a, final T b);
@ -49,7 +49,7 @@ public abstract class AbstractComparator<T extends AbstractSlice>
* Used to reduce the space requirements
* for internal data structures like index blocks.
*
* If start < limit, you may return a new start which is a
* If start &lt; limit, you may return a new start which is a
* shorter string in [start, limit).
*
* Simple comparator implementations may return null if they
@ -67,7 +67,7 @@ public abstract class AbstractComparator<T extends AbstractSlice>
* for internal data structures like index blocks.
*
* You may return a new short key (key1) where
* key1 >= key.
* key1 &ge; key.
*
* Simple comparator implementations may return null if they
* wish to leave the key unchanged. i.e., an implementation of

@ -19,10 +19,10 @@ package org.rocksdb;
* instance of a C++ BaseComparatorJniCallback subclass and
* passes that to RocksDB as the comparator. That subclass of
* BaseComparatorJniCallback creates the Java
* {@see org.rocksdb.AbstractSlice} subclass Objects. When you dispose
* the Java {@see org.rocksdb.AbstractComparator} subclass, it disposes the
* @see org.rocksdb.AbstractSlice subclass Objects. When you dispose
* the Java @see org.rocksdb.AbstractComparator subclass, it disposes the
* C++ BaseComparatorJniCallback subclass, which in turn destroys the
* Java {@see org.rocksdb.AbstractSlice} subclass Objects.
* Java @see org.rocksdb.AbstractSlice subclass Objects.
*/
abstract class AbstractSlice<T> extends RocksObject {
@ -31,7 +31,7 @@ abstract class AbstractSlice<T> extends RocksObject {
*
* @return The slice data. Note, the type of access is
* determined by the subclass
* @see org.rocksdb.AbstractSlice#data0(long).
* @see org.rocksdb.AbstractSlice#data0(long)
*/
public T data() {
assert (isInitialized());
@ -95,9 +95,9 @@ abstract class AbstractSlice<T> extends RocksObject {
* @param other A slice to compare against
*
* @return Should return either:
* 1) < 0 if this < other
* 1) &lt; 0 if this &lt; other
* 2) == 0 if this == other
* 3) > 0 if this > other
* 3) &gt; 0 if this &gt; other
*/
public int compare(final AbstractSlice other) {
assert (other != null);
@ -145,7 +145,6 @@ abstract class AbstractSlice<T> extends RocksObject {
/**
* Deletes underlying C++ slice pointer.
* <p/>
* Note that this function should be called only after all
* RocksDB instances referencing the slice are closed.
* Otherwise an undefined behavior will occur.

@ -60,7 +60,7 @@ public class BloomFilter extends Filter {
* bits_per_key: bits per key in bloom filter. A good value for bits_per_key
* is 10, which yields a filter with ~ 1% false positive rate.
* <p><strong>default bits_per_key</strong>: 10</p>
* </p>
*
* <p>use_block_based_builder: use block based filter rather than full filter.
* If you want to builder full filter, it needs to be set to false.
* </p>

@ -266,7 +266,7 @@ public interface ColumnFamilyOptionsInterface {
int numLevels();
/**
* Number of files to trigger level-0 compaction. A value < 0 means that
* Number of files to trigger level-0 compaction. A value &lt; 0 means that
* level-0 compaction will not be triggered by number of files at all.
* Default: 4
*
@ -278,7 +278,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* The number of files in level 0 to trigger compaction from level-0 to
* level-1. A value < 0 means that level-0 compaction will not be
* level-1. A value &lt; 0 means that level-0 compaction will not be
* triggered by number of files at all.
* Default: 4
*
@ -288,7 +288,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value < 0 means that no writing slow down will be triggered by
* point. A value &lt; 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @param numFiles soft limit on number of level-0 files.
@ -299,7 +299,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Soft limit on the number of level-0 files. We start slowing down writes
* at this point. A value < 0 means that no writing slow down will be
* at this point. A value &lt; 0 means that no writing slow down will be
* triggered by number of files in level-0.
*
* @return the soft limit on the number of level-0 files.
@ -324,7 +324,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* The highest level to which a new compacted memtable is pushed if it
* does not create overlap. We try to push to level 2 to avoid the
* relatively expensive level 0=>1 compactions and to avoid some
* relatively expensive level 0&ge;1 compactions and to avoid some
* expensive manifest file operations. We do not push all the way to
* the largest level since that can generate a lot of wasted disk
* space if the same key space is being repeatedly overwritten.
@ -339,7 +339,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* The highest level to which a new compacted memtable is pushed if it
* does not create overlap. We try to push to level 2 to avoid the
* relatively expensive level 0=>1 compactions and to avoid some
* relatively expensive level 0&ge;1 compactions and to avoid some
* expensive manifest file operations. We do not push all the way to
* the largest level since that can generate a lot of wasted disk
* space if the same key space is being repeatedly overwritten.
@ -515,7 +515,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level->level+1 compaction.
* stop building a single file in a level-&gt;level+1 compaction.
*
* @param maxGrandparentOverlapFactor maximum bytes of overlaps in
* "grandparent" level.
@ -526,7 +526,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level->level+1 compaction.
* stop building a single file in a level-&gt;level+1 compaction.
*
* @return maximum bytes of overlaps in "grandparent" level.
*/
@ -535,7 +535,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
@ -548,7 +548,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
@ -558,7 +558,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when <= 1.0.
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
@ -569,7 +569,7 @@ public interface ColumnFamilyOptionsInterface {
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when <= 1.0.
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
@ -600,11 +600,11 @@ public interface ColumnFamilyOptionsInterface {
/**
* The size of one block in arena memory allocation.
* If <= 0, a proper value is automatically calculated (usually 1/10 of
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additonal restriction of the The specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
@ -621,11 +621,11 @@ public interface ColumnFamilyOptionsInterface {
/**
* The size of one block in arena memory allocation.
* If <= 0, a proper value is automatically calculated (usually 1/10 of
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additonal restriction of the The specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
@ -734,7 +734,7 @@ public interface ColumnFamilyOptionsInterface {
boolean filterDeletes();
/**
* An iteration->Next() sequentially skips over keys with the same
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
@ -747,7 +747,7 @@ public interface ColumnFamilyOptionsInterface {
Object setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations);
/**
* An iteration->Next() sequentially skips over keys with the same
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
@ -794,7 +794,7 @@ public interface ColumnFamilyOptionsInterface {
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) <= sizeof(existing_value)
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
@ -810,7 +810,7 @@ public interface ColumnFamilyOptionsInterface {
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) <= sizeof(existing_value)
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
@ -945,7 +945,7 @@ public interface ColumnFamilyOptionsInterface {
* merge will be performed. Partial merge will not be called
* if the list of values to merge is less than min_partial_merge_operands.
*
* If min_partial_merge_operands < 2, then it will be treated as 2.
* If min_partial_merge_operands &lt; 2, then it will be treated as 2.
*
* Default: 2
*
@ -959,7 +959,7 @@ public interface ColumnFamilyOptionsInterface {
* merge will be performed. Partial merge will not be called
* if the list of values to merge is less than min_partial_merge_operands.
*
* If min_partial_merge_operands < 2, then it will be treated as 2.
* If min_partial_merge_operands &lt; 2, then it will be treated as 2.
*
* Default: 2
*

@ -502,6 +502,7 @@ public interface DBOptionsInterface {
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
*
* @param walTtlSeconds the ttl seconds
* @return the instance of the current Object.
@ -546,6 +547,7 @@ public interface DBOptionsInterface {
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
*
* @param sizeLimitMB size limit in mega-bytes.
* @return the instance of the current Object.

@ -123,7 +123,7 @@ public class PlainTableConfig extends TableFormatConfig {
}
/**
* <p>huge_page_tlb_size: if <=0, allocate hash indexes and blooms
* <p>huge_page_tlb_size: if &le;0, allocate hash indexes and blooms
* from malloc otherwise from huge page TLB.</p>
*
* <p>The user needs to reserve huge pages for it to be allocated,

@ -328,7 +328,7 @@ public class RocksDB extends RocksObject {
*
* @param options Options for opening the database
* @param path Absolute path to rocksdb database
* @return List<byte[]> List containing the column family names
* @return List&lt;byte[]&gt; List containing the column family names
*
* @throws RocksDBException
*/
@ -462,7 +462,6 @@ public class RocksDB extends RocksObject {
* to make this lighter weight is to avoid doing any IOs.
*
* @param readOptions {@link ReadOptions} instance
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is
* found in block-cache.
@ -922,13 +921,13 @@ public class RocksDB extends RocksObject {
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level<N>" - return the number of files at level <N>,
* where <N> is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at level &lt;N&gt;,
* where &lt;N&gt; is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
* of the sstables that make up the db contents.</li>
*</ul></p>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
@ -951,13 +950,13 @@ public class RocksDB extends RocksObject {
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level<N>" - return the number of files at level <N>,
* where <N> is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at level &lt;N&gt;,
* where &lt;N&gt; is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
* of the sstables that make up the db contents.</li>
*</ul></p>
*</ul>
*
* @param property to be fetched. See above for examples
* @return property value

@ -63,7 +63,7 @@ public class RocksIterator extends RocksObject {
* <p>Moves to the next entry in the source. After this call, Valid() is
* true iff the iterator was not positioned at the last entry in the source.</p>
*
* <p>REQUIRES: {@link #isValid()}<p>
* <p>REQUIRES: {@link #isValid()}</p>
*/
public void next() {
assert(isInitialized());
@ -74,7 +74,7 @@ public class RocksIterator extends RocksObject {
* <p>Moves to the previous entry in the source. After this call, Valid() is
* true iff the iterator was not positioned at the first entry in source.</p>
*
* <p>REQUIRES: {@link #isValid()}<p>
* <p>REQUIRES: {@link #isValid()}</p>
*/
public void prev() {
assert(isInitialized());
@ -86,7 +86,7 @@ public class RocksIterator extends RocksObject {
* the returned slice is valid only until the next modification of
* the iterator.</p>
*
* <p>REQUIRES: {@link #isValid()}<p>
* <p>REQUIRES: {@link #isValid()}</p>
*
* @return key for the current entry.
*/
@ -100,7 +100,7 @@ public class RocksIterator extends RocksObject {
* the returned slice is valid only until the next modification of
* the iterator.</p>
*
* <p>REQUIRES: !AtEnd() && !AtStart()</p>
* <p>REQUIRES: !AtEnd() &amp;&amp; !AtStart()</p>
* @return value for the current entry.
*/
public byte[] value() {

@ -11,14 +11,12 @@ package org.rocksdb;
*
* <p>
* RocksObject has {@code dispose()} function, which releases its associated c++
* resource.
* </p>
* </p>
* resource.</p>
* <p>
* This function can be either called manually, or being called automatically
* during the regular Java GC process. However, since Java may wrongly assume a
* RocksObject only contains a long member variable and think it is small in size,
* </p>
* <p>Java may give {@code RocksObject} low priority in the GC process. For this, it is
* Java may give {@code RocksObject} low priority in the GC process. For this, it is
* suggested to call {@code dispose()} manually. However, it is safe to let
* {@code RocksObject} go out-of-scope without manually calling {@code dispose()}
* as {@code dispose()} will be called in the finalizer during the

@ -66,10 +66,10 @@ public class Slice extends AbstractSlice<byte[]> {
* Deletes underlying C++ slice pointer
* and any buffered data.
*
* <p/>
* <p>
* Note that this function should be called only after all
* RocksDB instances referencing the slice are closed.
* Otherwise an undefined behavior will occur.
* Otherwise an undefined behavior will occur.</p>
*/
@Override
protected void disposeInternal() {

@ -41,14 +41,14 @@ public class WriteBatch extends RocksObject {
public native int count();
/**
* Store the mapping "key->value" in the database.
* Store the mapping "key-&gt;value" in the database.
*/
public void put(byte[] key, byte[] value) {
put(key, key.length, value, value.length);
}
/**
* Store the mapping "key->value" within given column
* Store the mapping "key-&gt;value" within given column
* family.
*/
public void put(ColumnFamilyHandle columnFamilyHandle,
@ -59,7 +59,7 @@ public class WriteBatch extends RocksObject {
/**
* Merge "value" with the existing value of "key" in the database.
* "key->merge(existing, value)"
* "key-&gt;merge(existing, value)"
*/
public void merge(byte[] key, byte[] value) {
merge(key, key.length, value, value.length);
@ -67,7 +67,7 @@ public class WriteBatch extends RocksObject {
/**
* Merge "value" with the existing value of "key" in given column family.
* "key->merge(existing, value)"
* "key-&gt;merge(existing, value)"
*/
public void merge(ColumnFamilyHandle columnFamilyHandle,
byte[] key, byte[] value) {

@ -104,7 +104,7 @@ public abstract class AbstractComparatorTest {
* @param a 4-bytes representing an integer key
* @param b 4-bytes representing an integer key
*
* @return negative if a < b, 0 if a == b, positive otherwise
* @return negative if a &lt; b, 0 if a == b, positive otherwise
*/
protected final int compareIntKeys(final byte[] a, final byte[] b) {

@ -38,7 +38,7 @@ public class PlatformRandomHelper {
/**
* Random32Bit is a class which overrides {@code nextLong} to
* provide random numbers which fit in size_t. This workaround
* is necessary because there is no unsigned_int < Java 8
* is necessary because there is no unsigned_int &lt; Java 8
*/
private static class Random32Bit extends Random {
@Override

Loading…
Cancel
Save