Fixed various memory leaks and Java 8 JNI Compatibility

Summary:
I have manually audited the entire RocksJava code base.

Sorry for the large pull-request, I have broken it down into many small atomic commits though.

My initial intention was to fix the warnings that appear when running RocksJava on Java 8 with `-Xcheck:jni`, for example when running `make jtest` you would see many errors similar to:

```
WARNING in native method: JNI call made without checking exceptions when required to from CallObjectMethod
WARNING in native method: JNI call made without checking exceptions when required to from CallVoidMethod
WARNING in native method: JNI call made without checking exceptions when required to from CallStaticVoidMethod
...
```

A few of those warnings still remain, however they seem to come directly from the JVM and are not directly related to RocksJava; I am in contact with the OpenJDK hostpot-dev mailing list about these - http://mail.openjdk.java.net/pipermail/hotspot-dev/2017-February/025981.html.

As a result of fixing these, I realised we were not r
Closes https://github.com/facebook/rocksdb/pull/1890

Differential Revision: D4591758

Pulled By: siying

fbshipit-source-id: 7f7fdf4
main
Adam Retter 8 years ago committed by Facebook Github Bot
parent be3e5568be
commit c6d464a9da
  1. 16
      java/Makefile
  2. 42
      java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
  3. 42
      java/rocksjni/backupablejni.cc
  4. 40
      java/rocksjni/backupenginejni.cc
  5. 17
      java/rocksjni/checkpoint.cc
  6. 5
      java/rocksjni/columnfamilyhandle.cc
  7. 4
      java/rocksjni/compaction_filter.cc
  8. 4
      java/rocksjni/comparator.cc
  9. 284
      java/rocksjni/comparatorjnicallback.cc
  10. 3
      java/rocksjni/comparatorjnicallback.h
  11. 4
      java/rocksjni/env.cc
  12. 11
      java/rocksjni/env_options.cc
  13. 48
      java/rocksjni/external_sst_file_info.cc
  14. 17
      java/rocksjni/filter.cc
  15. 26
      java/rocksjni/iterator.cc
  16. 267
      java/rocksjni/loggerjnicallback.cc
  17. 4
      java/rocksjni/loggerjnicallback.h
  18. 25
      java/rocksjni/merge_operator.cc
  19. 343
      java/rocksjni/options.cc
  20. 2169
      java/rocksjni/portal.h
  21. 65
      java/rocksjni/ratelimiterjni.cc
  22. 4
      java/rocksjni/restorejni.cc
  23. 789
      java/rocksjni/rocksjni.cc
  24. 132
      java/rocksjni/slice.cc
  25. 11
      java/rocksjni/sst_file_writerjni.cc
  26. 26
      java/rocksjni/statistics.cc
  27. 9
      java/rocksjni/transaction_log.cc
  28. 105
      java/rocksjni/ttl.cc
  29. 17
      java/rocksjni/write_batch.cc
  30. 10
      java/rocksjni/write_batch_test.cc
  31. 62
      java/rocksjni/write_batch_with_index.cc
  32. 167
      java/rocksjni/writebatchhandlerjnicallback.cc
  33. 9
      java/samples/src/main/java/RocksDBColumnFamilySample.java
  34. 58
      java/samples/src/main/java/RocksDBSample.java
  35. 14
      java/src/main/java/org/rocksdb/AbstractSlice.java
  36. 2
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  37. 13
      java/src/main/java/org/rocksdb/DBOptions.java
  38. 12
      java/src/main/java/org/rocksdb/DBOptionsInterface.java
  39. 49
      java/src/main/java/org/rocksdb/DirectSlice.java
  40. 12
      java/src/main/java/org/rocksdb/EnvOptions.java
  41. 68
      java/src/main/java/org/rocksdb/GenericRateLimiterConfig.java
  42. 6
      java/src/main/java/org/rocksdb/MergeOperator.java
  43. 4
      java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
  44. 13
      java/src/main/java/org/rocksdb/Options.java
  45. 26
      java/src/main/java/org/rocksdb/RateLimiterConfig.java
  46. 31
      java/src/main/java/org/rocksdb/RocksDB.java
  47. 18
      java/src/main/java/org/rocksdb/RocksMutableObject.java
  48. 30
      java/src/main/java/org/rocksdb/Slice.java
  49. 10
      java/src/main/java/org/rocksdb/StringAppendOperator.java
  50. 2
      java/src/main/java/org/rocksdb/TransactionLogIterator.java
  51. 24
      java/src/main/java/org/rocksdb/WBWIRocksIterator.java
  52. 14
      java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
  53. 5
      java/src/test/java/org/rocksdb/ColumnFamilyTest.java
  54. 18
      java/src/test/java/org/rocksdb/DBOptionsTest.java
  55. 23
      java/src/test/java/org/rocksdb/DirectSliceTest.java
  56. 20
      java/src/test/java/org/rocksdb/EnvOptionsTest.java
  57. 8
      java/src/test/java/org/rocksdb/KeyMayExistTest.java
  58. 79
      java/src/test/java/org/rocksdb/MergeTest.java
  59. 43
      java/src/test/java/org/rocksdb/OptionsTest.java
  60. 49
      java/src/test/java/org/rocksdb/RateLimiterTest.java
  61. 13
      java/src/test/java/org/rocksdb/RocksDBTest.java
  62. 19
      java/src/test/java/org/rocksdb/SliceTest.java
  63. 10
      java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
  64. 18
      java/src/test/java/org/rocksdb/test/RocksJunitRunner.java

@ -18,7 +18,6 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.ExternalSstFileInfo\ org.rocksdb.ExternalSstFileInfo\
org.rocksdb.FlushOptions\ org.rocksdb.FlushOptions\
org.rocksdb.Filter\ org.rocksdb.Filter\
org.rocksdb.GenericRateLimiterConfig\
org.rocksdb.HashLinkedListMemTableConfig\ org.rocksdb.HashLinkedListMemTableConfig\
org.rocksdb.HashSkipListMemTableConfig\ org.rocksdb.HashSkipListMemTableConfig\
org.rocksdb.Logger\ org.rocksdb.Logger\
@ -91,6 +90,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.NativeLibraryLoaderTest\ org.rocksdb.NativeLibraryLoaderTest\
org.rocksdb.OptionsTest\ org.rocksdb.OptionsTest\
org.rocksdb.PlainTableConfigTest\ org.rocksdb.PlainTableConfigTest\
org.rocksdb.RateLimiterTest\
org.rocksdb.ReadOnlyTest\ org.rocksdb.ReadOnlyTest\
org.rocksdb.ReadOptionsTest\ org.rocksdb.ReadOptionsTest\
org.rocksdb.RocksDBTest\ org.rocksdb.RocksDBTest\
@ -136,6 +136,14 @@ JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR):$(JAVA_HAMCR_JAR):$(JAVA_MOCKITO_JAR):$(J
MVN_LOCAL = ~/.m2/repository MVN_LOCAL = ~/.m2/repository
# Set the default JAVA_ARGS to "" for DEBUG_LEVEL=0
JAVA_ARGS? =
# When debugging add -Xcheck:jni to the java args
ifneq ($(DEBUG_LEVEL),0)
JAVA_ARGS = -ea -Xcheck:jni
endif
clean: clean:
$(AM_V_at)rm -rf include/* $(AM_V_at)rm -rf include/*
$(AM_V_at)rm -rf test-libs/ $(AM_V_at)rm -rf test-libs/
@ -164,7 +172,7 @@ sample: java
$(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java
$(AM_V_at)@rm -rf /tmp/rocksdbjni $(AM_V_at)@rm -rf /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni $(AM_V_at)@rm -rf /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
@ -172,7 +180,7 @@ column_family_sample: java
$(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
$(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java
$(AM_V_at)@rm -rf /tmp/rocksdbjni $(AM_V_at)@rm -rf /tmp/rocksdbjni
java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni $(AM_V_at)@rm -rf /tmp/rocksdbjni
resolve_test_deps: resolve_test_deps:
@ -194,7 +202,7 @@ java_test: java resolve_test_deps
test: java java_test run_test test: java java_test run_test
run_test: run_test:
java -ea -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS) java $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS)
db_bench: java db_bench: java
$(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES) $(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES)

@ -572,7 +572,7 @@ public class DbBenchmark {
(Integer)flags_.get(Flag.num_levels)); (Integer)flags_.get(Flag.num_levels));
options.setTargetFileSizeBase( options.setTargetFileSizeBase(
(Integer)flags_.get(Flag.target_file_size_base)); (Integer)flags_.get(Flag.target_file_size_base));
options.setTargetFileSizeMultiplier((Double) flags_.get(Flag.target_file_size_multiplier)); options.setTargetFileSizeMultiplier((Integer)flags_.get(Flag.target_file_size_multiplier));
options.setMaxBytesForLevelBase( options.setMaxBytesForLevelBase(
(Integer)flags_.get(Flag.max_bytes_for_level_base)); (Integer)flags_.get(Flag.max_bytes_for_level_base));
options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier)); options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier));
@ -588,12 +588,10 @@ public class DbBenchmark {
(Double)flags_.get(Flag.hard_rate_limit)); (Double)flags_.get(Flag.hard_rate_limit));
options.setRateLimitDelayMaxMilliseconds( options.setRateLimitDelayMaxMilliseconds(
(Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds)); (Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds));
options.setMaxGrandparentOverlapFactor( options.setMaxCompactionBytes(
(Integer)flags_.get(Flag.max_grandparent_overlap_factor)); (Long) flags_.get(Flag.max_compaction_bytes));
options.setDisableAutoCompactions( options.setDisableAutoCompactions(
(Boolean)flags_.get(Flag.disable_auto_compactions)); (Boolean)flags_.get(Flag.disable_auto_compactions));
options.setSourceCompactionFactor(
(Integer)flags_.get(Flag.source_compaction_factor));
options.setMaxSuccessiveMerges( options.setMaxSuccessiveMerges(
(Integer)flags_.get(Flag.max_successive_merges)); (Integer)flags_.get(Flag.max_successive_merges));
options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));
@ -978,7 +976,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
write_buffer_size(4 * SizeUnit.MB, write_buffer_size(4L * SizeUnit.MB,
"Number of bytes to buffer in memtable before compacting\n" + "Number of bytes to buffer in memtable before compacting\n" +
"\t(initialized to default value by 'main'.)") { "\t(initialized to default value by 'main'.)") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
@ -1056,7 +1054,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
numdistinct(1000, numdistinct(1000L,
"Number of distinct keys to use. Used in RandomWithVerify to\n" + "Number of distinct keys to use. Used in RandomWithVerify to\n" +
"\tread/write on fewer keys so that gets are more likely to find the\n" + "\tread/write on fewer keys so that gets are more likely to find the\n" +
"\tkey and puts are more likely to update the same key.") { "\tkey and puts are more likely to update the same key.") {
@ -1064,7 +1062,7 @@ public class DbBenchmark {
return Long.parseLong(value); return Long.parseLong(value);
} }
}, },
merge_keys(-1, merge_keys(-1L,
"Number of distinct keys to use for MergeRandom and\n" + "Number of distinct keys to use for MergeRandom and\n" +
"\tReadRandomMergeRandom.\n" + "\tReadRandomMergeRandom.\n" +
"\tIf negative, there will be FLAGS_num keys.") { "\tIf negative, there will be FLAGS_num keys.") {
@ -1169,7 +1167,7 @@ public class DbBenchmark {
return Long.parseLong(value); return Long.parseLong(value);
} }
}, },
compressed_cache_size(-1, compressed_cache_size(-1L,
"Number of bytes to use as a cache of compressed data.") { "Number of bytes to use as a cache of compressed data.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Long.parseLong(value); return Long.parseLong(value);
@ -1188,7 +1186,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
memtable_bloom_size_ratio(0, "Ratio of memtable used by the bloom filter.\n" memtable_bloom_size_ratio(0.0d, "Ratio of memtable used by the bloom filter.\n"
+ "\t0 means no bloom filter.") { + "\t0 means no bloom filter.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Double.parseDouble(value); return Double.parseDouble(value);
@ -1212,7 +1210,7 @@ public class DbBenchmark {
return parseBoolean(value); return parseBoolean(value);
} }
}, },
writes(-1,"Number of write operations to do. If negative, do\n" + writes(-1L, "Number of write operations to do. If negative, do\n" +
"\t--num reads.") { "\t--num reads.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Long.parseLong(value); return Long.parseLong(value);
@ -1255,7 +1253,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
max_bytes_for_level_multiplier(10, max_bytes_for_level_multiplier(10.0d,
"A multiplier to compute max bytes for level-N (N >= 2)") { "A multiplier to compute max bytes for level-N (N >= 2)") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Double.parseDouble(value); return Double.parseDouble(value);
@ -1337,7 +1335,7 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
stats_interval(0,"Stats are reported every N operations when\n" + stats_interval(0L, "Stats are reported every N operations when\n" +
"\tthis is greater than zero. When 0 the interval grows over time.") { "\tthis is greater than zero. When 0 the interval grows over time.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Long.parseLong(value); return Long.parseLong(value);
@ -1354,12 +1352,12 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
soft_rate_limit(0.0,"") { soft_rate_limit(0.0d,"") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Double.parseDouble(value); return Double.parseDouble(value);
} }
}, },
hard_rate_limit(0.0,"When not equal to 0 this make threads\n" + hard_rate_limit(0.0d,"When not equal to 0 this make threads\n" +
"\tsleep at each stats reporting interval until the compaction\n" + "\tsleep at each stats reporting interval until the compaction\n" +
"\tscore for all levels is less than or equal to this value.") { "\tscore for all levels is less than or equal to this value.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
@ -1373,11 +1371,10 @@ public class DbBenchmark {
return Integer.parseInt(value); return Integer.parseInt(value);
} }
}, },
max_grandparent_overlap_factor(10,"Control maximum bytes of\n" + max_compaction_bytes(0L, "Limit number of bytes in one compaction to be lower than this\n" +
"\toverlaps in grandparent (i.e., level+2) before we stop building a\n" + "\threshold. But it's not guaranteed.") {
"\tsingle file in a level->level+1 compaction.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Integer.parseInt(value); return Long.parseLong(value);
} }
}, },
readonly(false,"Run read only benchmarks.") { readonly(false,"Run read only benchmarks.") {
@ -1390,13 +1387,6 @@ public class DbBenchmark {
return parseBoolean(value); return parseBoolean(value);
} }
}, },
source_compaction_factor(1,"Cap the size of data in level-K for\n" +
"\ta compaction run that compacts Level-K with Level-(K+1) (for\n" +
"\tK >= 1)") {
@Override public Object parseValue(String value) {
return Integer.parseInt(value);
}
},
wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") { wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") {
@Override public Object parseValue(String value) { @Override public Object parseValue(String value) {
return Long.parseLong(value); return Long.parseLong(value);

@ -27,8 +27,12 @@
*/ */
jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions( jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
JNIEnv* env, jclass jcls, jstring jpath) { JNIEnv* env, jclass jcls, jstring jpath) {
const char* cpath = env->GetStringUTFChars(jpath, NULL); const char* cpath = env->GetStringUTFChars(jpath, nullptr);
auto bopt = new rocksdb::BackupableDBOptions(cpath); if(cpath == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* bopt = new rocksdb::BackupableDBOptions(cpath);
env->ReleaseStringUTFChars(jpath, cpath); env->ReleaseStringUTFChars(jpath, cpath);
return reinterpret_cast<jlong>(bopt); return reinterpret_cast<jlong>(bopt);
} }
@ -40,7 +44,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
*/ */
jstring Java_org_rocksdb_BackupableDBOptions_backupDir( jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
JNIEnv* env, jobject jopt, jlong jhandle) { JNIEnv* env, jobject jopt, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return env->NewStringUTF(bopt->backup_dir.c_str()); return env->NewStringUTF(bopt->backup_dir.c_str());
} }
@ -51,7 +55,7 @@ jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles( void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->share_table_files = flag; bopt->share_table_files = flag;
} }
@ -62,7 +66,7 @@ void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(
*/ */
jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles( jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->share_table_files; return bopt->share_table_files;
} }
@ -73,7 +77,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setSync( void Java_org_rocksdb_BackupableDBOptions_setSync(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->sync = flag; bopt->sync = flag;
} }
@ -84,7 +88,7 @@ void Java_org_rocksdb_BackupableDBOptions_setSync(
*/ */
jboolean Java_org_rocksdb_BackupableDBOptions_sync( jboolean Java_org_rocksdb_BackupableDBOptions_sync(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->sync; return bopt->sync;
} }
@ -95,7 +99,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_sync(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData( void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->destroy_old_data = flag; bopt->destroy_old_data = flag;
} }
@ -106,7 +110,7 @@ void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(
*/ */
jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData( jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->destroy_old_data; return bopt->destroy_old_data;
} }
@ -117,7 +121,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles( void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->backup_log_files = flag; bopt->backup_log_files = flag;
} }
@ -128,7 +132,7 @@ void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(
*/ */
jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles( jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->backup_log_files; return bopt->backup_log_files;
} }
@ -139,7 +143,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit( void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jbackup_rate_limit) { JNIEnv* env, jobject jobj, jlong jhandle, jlong jbackup_rate_limit) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->backup_rate_limit = jbackup_rate_limit; bopt->backup_rate_limit = jbackup_rate_limit;
} }
@ -150,7 +154,7 @@ void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
*/ */
jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit( jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->backup_rate_limit; return bopt->backup_rate_limit;
} }
@ -161,7 +165,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit( void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrestore_rate_limit) { JNIEnv* env, jobject jobj, jlong jhandle, jlong jrestore_rate_limit) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->restore_rate_limit = jrestore_rate_limit; bopt->restore_rate_limit = jrestore_rate_limit;
} }
@ -172,7 +176,7 @@ void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
*/ */
jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit( jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->restore_rate_limit; return bopt->restore_rate_limit;
} }
@ -183,7 +187,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
*/ */
void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum( void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->share_files_with_checksum = flag; bopt->share_files_with_checksum = flag;
} }
@ -194,7 +198,7 @@ void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
*/ */
jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum( jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->share_files_with_checksum; return bopt->share_files_with_checksum;
} }
@ -205,7 +209,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
*/ */
void Java_org_rocksdb_BackupableDBOptions_disposeInternal( void Java_org_rocksdb_BackupableDBOptions_disposeInternal(
JNIEnv* env, jobject jopt, jlong jhandle) { JNIEnv* env, jobject jopt, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle); auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
assert(bopt); assert(bopt != nullptr);
delete bopt; delete bopt;
} }

@ -82,11 +82,15 @@ jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups(
backup_engine->GetCorruptedBackups(&backup_ids); backup_engine->GetCorruptedBackups(&backup_ids);
// store backupids in int array // store backupids in int array
std::vector<jint> int_backup_ids(backup_ids.begin(), backup_ids.end()); std::vector<jint> int_backup_ids(backup_ids.begin(), backup_ids.end());
// Store ints in java array // Store ints in java array
jintArray ret_backup_ids;
// Its ok to loose precision here (64->32) // Its ok to loose precision here (64->32)
jsize ret_backup_ids_size = static_cast<jsize>(backup_ids.size()); jsize ret_backup_ids_size = static_cast<jsize>(backup_ids.size());
ret_backup_ids = env->NewIntArray(ret_backup_ids_size); jintArray ret_backup_ids = env->NewIntArray(ret_backup_ids_size);
if(ret_backup_ids == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size, env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size,
int_backup_ids.data()); int_backup_ids.data());
return ret_backup_ids; return ret_backup_ids;
@ -155,14 +159,24 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromBackup(
JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id, JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id,
jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) { jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) {
auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle); auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
const char* db_dir = env->GetStringUTFChars(jdb_dir, 0); const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); if(db_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_dir, db_dir);
return;
}
auto* restore_options = auto* restore_options =
reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle); reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
auto status = auto status =
backup_engine->RestoreDBFromBackup( backup_engine->RestoreDBFromBackup(
static_cast<rocksdb::BackupID>(jbackup_id), db_dir, wal_dir, static_cast<rocksdb::BackupID>(jbackup_id), db_dir, wal_dir,
*restore_options); *restore_options);
env->ReleaseStringUTFChars(jwal_dir, wal_dir); env->ReleaseStringUTFChars(jwal_dir, wal_dir);
env->ReleaseStringUTFChars(jdb_dir, db_dir); env->ReleaseStringUTFChars(jdb_dir, db_dir);
@ -182,13 +196,23 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
JNIEnv* env, jobject jbe, jlong jbe_handle, jstring jdb_dir, JNIEnv* env, jobject jbe, jlong jbe_handle, jstring jdb_dir,
jstring jwal_dir, jlong jrestore_options_handle) { jstring jwal_dir, jlong jrestore_options_handle) {
auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle); auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
const char* db_dir = env->GetStringUTFChars(jdb_dir, 0); const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); if(db_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_dir, db_dir);
return;
}
auto* restore_options = auto* restore_options =
reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle); reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
auto status = auto status =
backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir, backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir,
*restore_options); *restore_options);
env->ReleaseStringUTFChars(jwal_dir, wal_dir); env->ReleaseStringUTFChars(jwal_dir, wal_dir);
env->ReleaseStringUTFChars(jdb_dir, db_dir); env->ReleaseStringUTFChars(jdb_dir, db_dir);
@ -206,5 +230,7 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
*/ */
void Java_org_rocksdb_BackupEngine_disposeInternal( void Java_org_rocksdb_BackupEngine_disposeInternal(
JNIEnv* env, jobject jbe, jlong jbe_handle) { JNIEnv* env, jobject jbe, jlong jbe_handle) {
delete reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle); auto* be = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
assert(be != nullptr);
delete be;
} }

@ -22,7 +22,7 @@
*/ */
jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env, jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env,
jclass jclazz, jlong jdb_handle) { jclass jclazz, jlong jdb_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle); auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb::Checkpoint* checkpoint; rocksdb::Checkpoint* checkpoint;
rocksdb::Checkpoint::Create(db, &checkpoint); rocksdb::Checkpoint::Create(db, &checkpoint);
return reinterpret_cast<jlong>(checkpoint); return reinterpret_cast<jlong>(checkpoint);
@ -35,8 +35,8 @@ jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env,
*/ */
void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj, void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj,
jlong jhandle) { jlong jhandle) {
auto checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(jhandle); auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(jhandle);
assert(checkpoint); assert(checkpoint != nullptr);
delete checkpoint; delete checkpoint;
} }
@ -48,13 +48,20 @@ void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj,
void Java_org_rocksdb_Checkpoint_createCheckpoint( void Java_org_rocksdb_Checkpoint_createCheckpoint(
JNIEnv* env, jobject jobj, jlong jcheckpoint_handle, JNIEnv* env, jobject jobj, jlong jcheckpoint_handle,
jstring jcheckpoint_path) { jstring jcheckpoint_path) {
auto checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(
jcheckpoint_handle);
const char* checkpoint_path = env->GetStringUTFChars( const char* checkpoint_path = env->GetStringUTFChars(
jcheckpoint_path, 0); jcheckpoint_path, 0);
if(checkpoint_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(
jcheckpoint_handle);
rocksdb::Status s = checkpoint->CreateCheckpoint( rocksdb::Status s = checkpoint->CreateCheckpoint(
checkpoint_path); checkpoint_path);
env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path); env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path);
if (!s.ok()) { if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s); rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
} }

@ -20,6 +20,7 @@
*/ */
void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal( void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(handle); auto* cfh = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(handle);
delete it; assert(cfh != nullptr);
delete cfh;
} }

@ -20,6 +20,8 @@
*/ */
void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal( void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::CompactionFilter*>(handle); auto* cf = reinterpret_cast<rocksdb::CompactionFilter*>(handle);
assert(cf != nullptr);
delete cf;
} }
// </editor-fold> // </editor-fold>

@ -27,7 +27,9 @@
*/ */
void Java_org_rocksdb_AbstractComparator_disposeInternal( void Java_org_rocksdb_AbstractComparator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::BaseComparatorJniCallback*>(handle); auto* bcjc = reinterpret_cast<rocksdb::BaseComparatorJniCallback*>(handle);
assert(bcjc != nullptr);
delete bcjc;
} }
// </editor-fold> // </editor-fold>

@ -17,35 +17,60 @@ BaseComparatorJniCallback::BaseComparatorJniCallback(
mtx_findShortestSeparator(new port::Mutex(copt->use_adaptive_mutex)) { mtx_findShortestSeparator(new port::Mutex(copt->use_adaptive_mutex)) {
// Note: Comparator methods may be accessed by multiple threads, // Note: Comparator methods may be accessed by multiple threads,
// so we ref the jvm not the env // so we ref the jvm not the env
const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm); const jint rs = env->GetJavaVM(&m_jvm);
assert(rs == JNI_OK); if(rs != JNI_OK) {
// exception thrown
return;
}
// Note: we want to access the Java Comparator instance // Note: we want to access the Java Comparator instance
// across multiple method calls, so we create a global ref // across multiple method calls, so we create a global ref
assert(jComparator != nullptr);
m_jComparator = env->NewGlobalRef(jComparator); m_jComparator = env->NewGlobalRef(jComparator);
if(m_jComparator == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
// Note: The name of a Comparator will not change during it's lifetime, // Note: The name of a Comparator will not change during it's lifetime,
// so we cache it in a global var // so we cache it in a global var
jmethodID jNameMethodId = AbstractComparatorJni::getNameMethodId(env); jmethodID jNameMethodId = AbstractComparatorJni::getNameMethodId(env);
if(jNameMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
jstring jsName = (jstring)env->CallObjectMethod(m_jComparator, jNameMethodId); jstring jsName = (jstring)env->CallObjectMethod(m_jComparator, jNameMethodId);
m_name = JniUtil::copyString(env, jsName); // also releases jsName if(env->ExceptionCheck()) {
// exception thrown
return;
}
jboolean has_exception = JNI_FALSE;
m_name = JniUtil::copyString(env, jsName,
&has_exception); // also releases jsName
if (has_exception == JNI_TRUE) {
// exception thrown
return;
}
m_jCompareMethodId = AbstractComparatorJni::getCompareMethodId(env); m_jCompareMethodId = AbstractComparatorJni::getCompareMethodId(env);
if(m_jCompareMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jFindShortestSeparatorMethodId = m_jFindShortestSeparatorMethodId =
AbstractComparatorJni::getFindShortestSeparatorMethodId(env); AbstractComparatorJni::getFindShortestSeparatorMethodId(env);
if(m_jFindShortestSeparatorMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jFindShortSuccessorMethodId = m_jFindShortSuccessorMethodId =
AbstractComparatorJni::getFindShortSuccessorMethodId(env); AbstractComparatorJni::getFindShortSuccessorMethodId(env);
} if(m_jFindShortSuccessorMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
/** return;
* Attach/Get a JNIEnv for the current native thread }
*/
JNIEnv* BaseComparatorJniCallback::getJniEnv() const {
JNIEnv *env;
jint rs __attribute__((unused)) =
m_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
assert(rs == JNI_OK);
return env;
} }
const char* BaseComparatorJniCallback::Name() const { const char* BaseComparatorJniCallback::Name() const {
@ -53,22 +78,50 @@ const char* BaseComparatorJniCallback::Name() const {
} }
int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const { int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const {
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
// TODO(adamretter): slice objects can potentially be cached using thread // TODO(adamretter): slice objects can potentially be cached using thread
// local variables to avoid locking. Could make this configurable depending on // local variables to avoid locking. Could make this configurable depending on
// performance. // performance.
mtx_compare->Lock(); mtx_compare->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceA, &a, JNI_FALSE); bool pending_exception =
AbstractSliceJni::setHandle(m_env, m_jSliceB, &b, JNI_FALSE); AbstractSliceJni::setHandle(env, m_jSliceA, &a, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return 0;
}
pending_exception =
AbstractSliceJni::setHandle(env, m_jSliceB, &b, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return 0;
}
jint result = jint result =
m_env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA, env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA,
m_jSliceB); m_jSliceB);
mtx_compare->Unlock(); mtx_compare->Unlock();
m_jvm->DetachCurrentThread(); if(env->ExceptionCheck()) {
// exception thrown from CallIntMethod
env->ExceptionDescribe(); // print out exception to stderr
result = 0; // we could not get a result from java callback so use 0
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return result; return result;
} }
@ -79,32 +132,80 @@ void BaseComparatorJniCallback::FindShortestSeparator(
return; return;
} }
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
const char* startUtf = start->c_str(); const char* startUtf = start->c_str();
jstring jsStart = m_env->NewStringUTF(startUtf); jstring jsStart = env->NewStringUTF(startUtf);
if(jsStart == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsStart);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
// TODO(adamretter): slice object can potentially be cached using thread local // TODO(adamretter): slice object can potentially be cached using thread local
// variable to avoid locking. Could make this configurable depending on // variable to avoid locking. Could make this configurable depending on
// performance. // performance.
mtx_findShortestSeparator->Lock(); mtx_findShortestSeparator->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit, JNI_FALSE); bool pending_exception =
AbstractSliceJni::setHandle(env, m_jSliceLimit, &limit, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
if(jsStart != nullptr) {
env->DeleteLocalRef(jsStart);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
jstring jsResultStart = jstring jsResultStart =
(jstring)m_env->CallObjectMethod(m_jComparator, (jstring)env->CallObjectMethod(m_jComparator,
m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit); m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit);
mtx_findShortestSeparator->Unlock(); mtx_findShortestSeparator->Unlock();
m_env->DeleteLocalRef(jsStart); if(env->ExceptionCheck()) {
// exception thrown from CallObjectMethod
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsStart);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->DeleteLocalRef(jsStart);
if (jsResultStart != nullptr) { if (jsResultStart != nullptr) {
// update start with result // update start with result
*start = jboolean has_exception = JNI_FALSE;
JniUtil::copyString(m_env, jsResultStart); // also releases jsResultStart std::string result = JniUtil::copyString(env, jsResultStart,
&has_exception); // also releases jsResultStart
if (has_exception == JNI_TRUE) {
if (env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
*start = result;
} }
m_jvm->DetachCurrentThread(); JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const { void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const {
@ -112,34 +213,69 @@ void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const {
return; return;
} }
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
const char* keyUtf = key->c_str(); const char* keyUtf = key->c_str();
jstring jsKey = m_env->NewStringUTF(keyUtf); jstring jsKey = env->NewStringUTF(keyUtf);
if(jsKey == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
} else if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsKey);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
jstring jsResultKey = jstring jsResultKey =
(jstring)m_env->CallObjectMethod(m_jComparator, (jstring)env->CallObjectMethod(m_jComparator,
m_jFindShortSuccessorMethodId, jsKey); m_jFindShortSuccessorMethodId, jsKey);
m_env->DeleteLocalRef(jsKey); if(env->ExceptionCheck()) {
// exception thrown from CallObjectMethod
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsKey);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->DeleteLocalRef(jsKey);
if (jsResultKey != nullptr) { if (jsResultKey != nullptr) {
// updates key with result, also releases jsResultKey. // updates key with result, also releases jsResultKey.
*key = JniUtil::copyString(m_env, jsResultKey); jboolean has_exception = JNI_FALSE;
std::string result = JniUtil::copyString(env, jsResultKey, &has_exception);
if (has_exception == JNI_TRUE) {
if (env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
*key = result;
} }
m_jvm->DetachCurrentThread(); JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
BaseComparatorJniCallback::~BaseComparatorJniCallback() { BaseComparatorJniCallback::~BaseComparatorJniCallback() {
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
m_env->DeleteGlobalRef(m_jComparator); if(m_jComparator != nullptr) {
env->DeleteGlobalRef(m_jComparator);
}
// Note: do not need to explicitly detach, as this function is effectively JniUtil::releaseJniEnv(m_jvm, attached_thread);
// called from the Java class's disposeInternal method, and so already
// has an attached thread, getJniEnv above is just a no-op Attach to get
// the env jvm->DetachCurrentThread();
} }
ComparatorJniCallback::ComparatorJniCallback( ComparatorJniCallback::ComparatorJniCallback(
@ -147,15 +283,42 @@ ComparatorJniCallback::ComparatorJniCallback(
const ComparatorJniCallbackOptions* copt) : const ComparatorJniCallbackOptions* copt) :
BaseComparatorJniCallback(env, jComparator, copt) { BaseComparatorJniCallback(env, jComparator, copt) {
m_jSliceA = env->NewGlobalRef(SliceJni::construct0(env)); m_jSliceA = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceA == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceB = env->NewGlobalRef(SliceJni::construct0(env)); m_jSliceB = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceB == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceLimit = env->NewGlobalRef(SliceJni::construct0(env)); m_jSliceLimit = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceLimit == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
} }
ComparatorJniCallback::~ComparatorJniCallback() { ComparatorJniCallback::~ComparatorJniCallback() {
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
m_env->DeleteGlobalRef(m_jSliceA); JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
m_env->DeleteGlobalRef(m_jSliceB); assert(env != nullptr);
m_env->DeleteGlobalRef(m_jSliceLimit);
if(m_jSliceA != nullptr) {
env->DeleteGlobalRef(m_jSliceA);
}
if(m_jSliceB != nullptr) {
env->DeleteGlobalRef(m_jSliceB);
}
if(m_jSliceLimit != nullptr) {
env->DeleteGlobalRef(m_jSliceLimit);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
DirectComparatorJniCallback::DirectComparatorJniCallback( DirectComparatorJniCallback::DirectComparatorJniCallback(
@ -163,14 +326,41 @@ DirectComparatorJniCallback::DirectComparatorJniCallback(
const ComparatorJniCallbackOptions* copt) : const ComparatorJniCallbackOptions* copt) :
BaseComparatorJniCallback(env, jComparator, copt) { BaseComparatorJniCallback(env, jComparator, copt) {
m_jSliceA = env->NewGlobalRef(DirectSliceJni::construct0(env)); m_jSliceA = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceA == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceB = env->NewGlobalRef(DirectSliceJni::construct0(env)); m_jSliceB = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceB == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceLimit = env->NewGlobalRef(DirectSliceJni::construct0(env)); m_jSliceLimit = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceLimit == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
} }
DirectComparatorJniCallback::~DirectComparatorJniCallback() { DirectComparatorJniCallback::~DirectComparatorJniCallback() {
JNIEnv* m_env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
m_env->DeleteGlobalRef(m_jSliceA); JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
m_env->DeleteGlobalRef(m_jSliceB); assert(env != nullptr);
m_env->DeleteGlobalRef(m_jSliceLimit);
if(m_jSliceA != nullptr) {
env->DeleteGlobalRef(m_jSliceA);
}
if(m_jSliceB != nullptr) {
env->DeleteGlobalRef(m_jSliceB);
}
if(m_jSliceLimit != nullptr) {
env->DeleteGlobalRef(m_jSliceLimit);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
} // namespace rocksdb } // namespace rocksdb

@ -61,7 +61,6 @@ class BaseComparatorJniCallback : public Comparator {
port::Mutex* mtx_compare; port::Mutex* mtx_compare;
// used for synchronisation in findShortestSeparator method // used for synchronisation in findShortestSeparator method
port::Mutex* mtx_findShortestSeparator; port::Mutex* mtx_findShortestSeparator;
JavaVM* m_jvm;
jobject m_jComparator; jobject m_jComparator;
std::string m_name; std::string m_name;
jmethodID m_jCompareMethodId; jmethodID m_jCompareMethodId;
@ -69,7 +68,7 @@ class BaseComparatorJniCallback : public Comparator {
jmethodID m_jFindShortSuccessorMethodId; jmethodID m_jFindShortSuccessorMethodId;
protected: protected:
JNIEnv* getJniEnv() const; JavaVM* m_jvm;
jobject m_jSliceA; jobject m_jSliceA;
jobject m_jSliceB; jobject m_jSliceB;
jobject m_jSliceLimit; jobject m_jSliceLimit;

@ -75,5 +75,7 @@ jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
*/ */
void Java_org_rocksdb_RocksMemEnv_disposeInternal( void Java_org_rocksdb_RocksMemEnv_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::Env*>(jhandle); auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
} }

@ -44,7 +44,9 @@ jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv *env, jclass jcls) {
*/ */
void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv *env, jobject jobj, void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv *env, jobject jobj,
jlong jhandle) { jlong jhandle) {
delete reinterpret_cast<rocksdb::EnvOptions *>(jhandle); auto* eo = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
assert(eo != nullptr);
delete eo;
} }
/* /*
@ -288,7 +290,8 @@ jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv *env,
void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv *env, jobject jobj, void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv *env, jobject jobj,
jlong jhandle, jlong jhandle,
jlong rl_handle) { jlong rl_handle) {
auto *rate_limiter = reinterpret_cast<rocksdb::RateLimiter *>(rl_handle); auto* sptr_rate_limiter =
auto *env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle); reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(rl_handle);
env_opt->rate_limiter = rate_limiter; auto* env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
env_opt->rate_limiter = sptr_rate_limiter->get();
} }

@ -31,17 +31,35 @@ jlong Java_org_rocksdb_ExternalSstFileInfo_newExternalSstFileInfo__Ljava_lang_St
JNIEnv *env, jclass jcls, jstring jfile_path, jstring jsmallest_key, JNIEnv *env, jclass jcls, jstring jfile_path, jstring jsmallest_key,
jstring jlargest_key, jlong jsequence_number, jlong jfile_size, jstring jlargest_key, jlong jsequence_number, jlong jfile_size,
jint jnum_entries, jint jversion) { jint jnum_entries, jint jversion) {
const char *file_path = env->GetStringUTFChars(jfile_path, NULL); const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, NULL); if(file_path == nullptr) {
const char *largest_key = env->GetStringUTFChars(jlargest_key, NULL); // exception thrown: OutOfMemoryError
return 0;
}
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, nullptr);
if(smallest_key == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jfile_path, file_path);
return 0;
}
const char *largest_key = env->GetStringUTFChars(jlargest_key, nullptr);
if(largest_key == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jfile_path, file_path);
return 0;
}
auto *external_sst_file_info = new rocksdb::ExternalSstFileInfo( auto *external_sst_file_info = new rocksdb::ExternalSstFileInfo(
file_path, smallest_key, largest_key, file_path, smallest_key, largest_key,
static_cast<rocksdb::SequenceNumber>(jsequence_number), static_cast<rocksdb::SequenceNumber>(jsequence_number),
static_cast<uint64_t>(jfile_size), static_cast<int32_t>(jnum_entries), static_cast<uint64_t>(jfile_size), static_cast<int32_t>(jnum_entries),
static_cast<int32_t>(jversion)); static_cast<int32_t>(jversion));
env->ReleaseStringUTFChars(jfile_path, file_path);
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jlargest_key, largest_key); env->ReleaseStringUTFChars(jlargest_key, largest_key);
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jfile_path, file_path);
return reinterpret_cast<jlong>(external_sst_file_info); return reinterpret_cast<jlong>(external_sst_file_info);
} }
@ -55,7 +73,11 @@ void Java_org_rocksdb_ExternalSstFileInfo_setFilePath(JNIEnv *env, jobject jobj,
jstring jfile_path) { jstring jfile_path) {
auto *external_sst_file_info = auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle); reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *file_path = env->GetStringUTFChars(jfile_path, NULL); const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
if(file_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->file_path = file_path; external_sst_file_info->file_path = file_path;
env->ReleaseStringUTFChars(jfile_path, file_path); env->ReleaseStringUTFChars(jfile_path, file_path);
} }
@ -81,7 +103,11 @@ void Java_org_rocksdb_ExternalSstFileInfo_setSmallestKey(
JNIEnv *env, jobject jobj, jlong jhandle, jstring jsmallest_key) { JNIEnv *env, jobject jobj, jlong jhandle, jstring jsmallest_key) {
auto *external_sst_file_info = auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle); reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, NULL); const char *smallest_key = env->GetStringUTFChars(jsmallest_key, nullptr);
if(smallest_key == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->smallest_key = smallest_key; external_sst_file_info->smallest_key = smallest_key;
env->ReleaseStringUTFChars(jsmallest_key, smallest_key); env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
} }
@ -111,6 +137,10 @@ void Java_org_rocksdb_ExternalSstFileInfo_setLargestKey(JNIEnv *env,
auto *external_sst_file_info = auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle); reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *largest_key = env->GetStringUTFChars(jlargest_key, NULL); const char *largest_key = env->GetStringUTFChars(jlargest_key, NULL);
if(largest_key == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->largest_key = largest_key; external_sst_file_info->largest_key = largest_key;
env->ReleaseStringUTFChars(jlargest_key, largest_key); env->ReleaseStringUTFChars(jlargest_key, largest_key);
} }
@ -238,5 +268,7 @@ jint Java_org_rocksdb_ExternalSstFileInfo_version(JNIEnv *env, jobject jobj,
void Java_org_rocksdb_ExternalSstFileInfo_disposeInternal(JNIEnv *env, void Java_org_rocksdb_ExternalSstFileInfo_disposeInternal(JNIEnv *env,
jobject jobj, jobject jobj,
jlong jhandle) { jlong jhandle) {
delete reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle); auto* esfi = reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
assert(esfi != nullptr);
delete esfi;
} }

@ -24,12 +24,10 @@
jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter( jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
JNIEnv* env, jclass jcls, jint bits_per_key, JNIEnv* env, jclass jcls, jint bits_per_key,
jboolean use_block_base_builder) { jboolean use_block_base_builder) {
auto* fp = const_cast<rocksdb::FilterPolicy *>( auto* sptr_filter =
rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder)); new std::shared_ptr<const rocksdb::FilterPolicy>(
auto* pFilterPolicy = rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder));
new std::shared_ptr<rocksdb::FilterPolicy>; return reinterpret_cast<jlong>(sptr_filter);
*pFilterPolicy = std::shared_ptr<rocksdb::FilterPolicy>(fp);
return reinterpret_cast<jlong>(pFilterPolicy);
} }
/* /*
@ -39,8 +37,7 @@ jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
*/ */
void Java_org_rocksdb_Filter_disposeInternal( void Java_org_rocksdb_Filter_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto* handle =
std::shared_ptr<rocksdb::FilterPolicy> *handle = reinterpret_cast<std::shared_ptr<const rocksdb::FilterPolicy> *>(jhandle);
reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(jhandle); delete handle; // delete std::shared_ptr
handle->reset();
} }

@ -21,7 +21,8 @@
*/ */
void Java_org_rocksdb_RocksIterator_disposeInternal( void Java_org_rocksdb_RocksIterator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
assert(it != nullptr);
delete it; delete it;
} }
@ -83,11 +84,16 @@ void Java_org_rocksdb_RocksIterator_prev0(
void Java_org_rocksdb_RocksIterator_seek0( void Java_org_rocksdb_RocksIterator_seek0(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle,
jbyteArray jtarget, jint jtarget_len) { jbyteArray jtarget, jint jtarget_len) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
jbyte* target = env->GetByteArrayElements(jtarget, 0); if(target == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Slice target_slice( rocksdb::Slice target_slice(
reinterpret_cast<char*>(target), jtarget_len); reinterpret_cast<char*>(target), jtarget_len);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
it->Seek(target_slice); it->Seek(target_slice);
env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
@ -100,7 +106,7 @@ void Java_org_rocksdb_RocksIterator_seek0(
*/ */
void Java_org_rocksdb_RocksIterator_status0( void Java_org_rocksdb_RocksIterator_status0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Status s = it->status(); rocksdb::Status s = it->status();
if (s.ok()) { if (s.ok()) {
@ -117,10 +123,14 @@ void Java_org_rocksdb_RocksIterator_status0(
*/ */
jbyteArray Java_org_rocksdb_RocksIterator_key0( jbyteArray Java_org_rocksdb_RocksIterator_key0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice key_slice = it->key(); rocksdb::Slice key_slice = it->key();
jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size())); jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
if(jkey == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()), env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()),
reinterpret_cast<const jbyte*>(key_slice.data())); reinterpret_cast<const jbyte*>(key_slice.data()));
return jkey; return jkey;
@ -133,11 +143,15 @@ jbyteArray Java_org_rocksdb_RocksIterator_key0(
*/ */
jbyteArray Java_org_rocksdb_RocksIterator_value0( jbyteArray Java_org_rocksdb_RocksIterator_value0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice value_slice = it->value(); rocksdb::Slice value_slice = it->value();
jbyteArray jkeyValue = jbyteArray jkeyValue =
env->NewByteArray(static_cast<jsize>(value_slice.size())); env->NewByteArray(static_cast<jsize>(value_slice.size()));
if(jkeyValue == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()), env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
reinterpret_cast<const jbyte*>(value_slice.data())); reinterpret_cast<const jbyte*>(value_slice.data()));
return jkeyValue; return jkeyValue;

@ -10,53 +10,106 @@
#include "rocksjni/loggerjnicallback.h" #include "rocksjni/loggerjnicallback.h"
#include "rocksjni/portal.h" #include "rocksjni/portal.h"
#include <cstdarg>
#include <cstdio>
namespace rocksdb { namespace rocksdb {
LoggerJniCallback::LoggerJniCallback( LoggerJniCallback::LoggerJniCallback(
JNIEnv* env, jobject jlogger) { JNIEnv* env, jobject jlogger) {
const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm); // Note: Logger methods may be accessed by multiple threads,
assert(rs == JNI_OK); // so we ref the jvm not the env
const jint rs = env->GetJavaVM(&m_jvm);
if(rs != JNI_OK) {
// exception thrown
return;
}
// Note: we want to access the Java Logger instance // Note: we want to access the Java Logger instance
// across multiple method calls, so we create a global ref // across multiple method calls, so we create a global ref
assert(jlogger != nullptr);
m_jLogger = env->NewGlobalRef(jlogger); m_jLogger = env->NewGlobalRef(jlogger);
if(m_jLogger == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jLogMethodId = LoggerJni::getLogMethodId(env); m_jLogMethodId = LoggerJni::getLogMethodId(env);
if(m_jLogMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env); jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env);
assert(jdebug_level != nullptr); if(jdebug_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jdebug_level = env->NewGlobalRef(jdebug_level); m_jdebug_level = env->NewGlobalRef(jdebug_level);
if(m_jdebug_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env); jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env);
assert(jinfo_level != nullptr); if(jinfo_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jinfo_level = env->NewGlobalRef(jinfo_level); m_jinfo_level = env->NewGlobalRef(jinfo_level);
if(m_jinfo_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env); jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env);
assert(jwarn_level != nullptr); if(jwarn_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jwarn_level = env->NewGlobalRef(jwarn_level); m_jwarn_level = env->NewGlobalRef(jwarn_level);
if(m_jwarn_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env); jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env);
assert(jerror_level != nullptr); if(jerror_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jerror_level = env->NewGlobalRef(jerror_level); m_jerror_level = env->NewGlobalRef(jerror_level);
if(m_jerror_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env); jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env);
assert(jfatal_level != nullptr); if(jfatal_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jfatal_level = env->NewGlobalRef(jfatal_level); m_jfatal_level = env->NewGlobalRef(jfatal_level);
if(m_jfatal_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env); jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env);
assert(jheader_level != nullptr); if(jheader_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jheader_level = env->NewGlobalRef(jheader_level); m_jheader_level = env->NewGlobalRef(jheader_level);
} if(m_jheader_level == nullptr) {
// exception thrown: OutOfMemoryError
/** return;
* Get JNIEnv for current native thread }
*/
JNIEnv* LoggerJniCallback::getJniEnv() const {
JNIEnv *env;
jint rs __attribute__((unused)) =
m_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
assert(rs == JNI_OK);
return env;
} }
void LoggerJniCallback::Logv(const char* format, va_list ap) { void LoggerJniCallback::Logv(const char* format, va_list ap) {
@ -94,69 +147,96 @@ void LoggerJniCallback::Logv(const InfoLogLevel log_level,
break; break;
} }
// We try twice: the first time with a fixed-size stack allocated buffer, assert(format != nullptr);
// and the second time with a much larger dynamically allocated buffer. assert(ap != nullptr);
char buffer[500]; const std::unique_ptr<char[]> msg = format_str(format, ap);
for (int iter = 0; iter < 2; iter++) {
char* base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
base = new char[bufsize];
}
char* p = base;
char* limit = base + bufsize;
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
p += vsnprintf(p, limit - p, format, backup_ap);
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
}
assert(p < limit);
*p++ = '\0';
JNIEnv* env = getJniEnv();
// pass java string to callback handler // pass msg to java callback handler
env->CallVoidMethod( jboolean attached_thread = JNI_FALSE;
m_jLogger, JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
m_jLogMethodId, assert(env != nullptr);
jlog_level,
env->NewStringUTF(base));
if (base != buffer) { jstring jmsg = env->NewStringUTF(msg.get());
delete[] base; if(jmsg == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
} }
break; JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->CallVoidMethod(m_jLogger, m_jLogMethodId, jlog_level, jmsg);
if(env->ExceptionCheck()) {
// exception thrown
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
} }
m_jvm->DetachCurrentThread();
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
} }
std::unique_ptr<char[]> LoggerJniCallback::format_str(const char* format, va_list ap) const {
va_list ap_copy;
va_copy(ap_copy, ap);
const size_t required = vsnprintf(nullptr, 0, format, ap_copy) + 1; // Extra space for '\0'
va_end(ap_copy);
std::unique_ptr<char[]> buf(new char[required]);
va_copy(ap_copy, ap);
vsnprintf(buf.get(), required, format, ap_copy);
va_end(ap_copy);
return buf;
}
LoggerJniCallback::~LoggerJniCallback() { LoggerJniCallback::~LoggerJniCallback() {
JNIEnv* env = getJniEnv(); jboolean attached_thread = JNI_FALSE;
env->DeleteGlobalRef(m_jLogger); JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
if(m_jLogger != nullptr) {
env->DeleteGlobalRef(m_jLogger);
}
if(m_jdebug_level != nullptr) {
env->DeleteGlobalRef(m_jdebug_level);
}
if(m_jinfo_level != nullptr) {
env->DeleteGlobalRef(m_jinfo_level);
}
if(m_jwarn_level != nullptr) {
env->DeleteGlobalRef(m_jwarn_level);
}
if(m_jerror_level != nullptr) {
env->DeleteGlobalRef(m_jerror_level);
}
if(m_jfatal_level != nullptr) {
env->DeleteGlobalRef(m_jfatal_level);
}
env->DeleteGlobalRef(m_jdebug_level); if(m_jheader_level != nullptr) {
env->DeleteGlobalRef(m_jinfo_level); env->DeleteGlobalRef(m_jheader_level);
env->DeleteGlobalRef(m_jwarn_level); }
env->DeleteGlobalRef(m_jerror_level);
env->DeleteGlobalRef(m_jfatal_level);
env->DeleteGlobalRef(m_jheader_level);
m_jvm->DetachCurrentThread(); JniUtil::releaseJniEnv(m_jvm, attached_thread);
} }
} // namespace rocksdb } // namespace rocksdb
@ -168,15 +248,14 @@ LoggerJniCallback::~LoggerJniCallback() {
*/ */
jlong Java_org_rocksdb_Logger_createNewLoggerOptions( jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
JNIEnv* env, jobject jobj, jlong joptions) { JNIEnv* env, jobject jobj, jlong joptions) {
rocksdb::LoggerJniCallback* c = auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
new rocksdb::LoggerJniCallback(env, jobj); new rocksdb::LoggerJniCallback(env, jobj));
// set log level // set log level
c->SetInfoLogLevel(reinterpret_cast<rocksdb::Options*> auto* options = reinterpret_cast<rocksdb::Options*>(joptions);
(joptions)->info_log_level); sptr_logger->get()->SetInfoLogLevel(options->info_log_level);
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>; return reinterpret_cast<jlong>(sptr_logger);
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
return reinterpret_cast<jlong>(pLoggerJniCallback);
} }
/* /*
@ -186,15 +265,14 @@ jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
*/ */
jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions( jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
JNIEnv* env, jobject jobj, jlong jdb_options) { JNIEnv* env, jobject jobj, jlong jdb_options) {
rocksdb::LoggerJniCallback* c = auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
new rocksdb::LoggerJniCallback(env, jobj); new rocksdb::LoggerJniCallback(env, jobj));
// set log level // set log level
c->SetInfoLogLevel(reinterpret_cast<rocksdb::DBOptions*> auto* db_options = reinterpret_cast<rocksdb::DBOptions*>(jdb_options);
(jdb_options)->info_log_level); sptr_logger->get()->SetInfoLogLevel(db_options->info_log_level);
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>; return reinterpret_cast<jlong>(sptr_logger);
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
return reinterpret_cast<jlong>(pLoggerJniCallback);
} }
/* /*
@ -204,9 +282,10 @@ jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
*/ */
void Java_org_rocksdb_Logger_setInfoLogLevel( void Java_org_rocksdb_Logger_setInfoLogLevel(
JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) { JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle = auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle); reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
(*handle)->SetInfoLogLevel(static_cast<rocksdb::InfoLogLevel>(jlog_level)); handle->get()->
SetInfoLogLevel(static_cast<rocksdb::InfoLogLevel>(jlog_level));
} }
/* /*
@ -216,9 +295,9 @@ void Java_org_rocksdb_Logger_setInfoLogLevel(
*/ */
jbyte Java_org_rocksdb_Logger_infoLogLevel( jbyte Java_org_rocksdb_Logger_infoLogLevel(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle = auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle); reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
return static_cast<jbyte>((*handle)->GetInfoLogLevel()); return static_cast<jbyte>(handle->get()->GetInfoLogLevel());
} }
/* /*
@ -228,7 +307,7 @@ jbyte Java_org_rocksdb_Logger_infoLogLevel(
*/ */
void Java_org_rocksdb_Logger_disposeInternal( void Java_org_rocksdb_Logger_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle = auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle); reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
handle->reset(); delete handle; // delete std::shared_ptr
} }

@ -10,6 +10,7 @@
#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ #define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
#include <jni.h> #include <jni.h>
#include <memory>
#include <string> #include <string>
#include "port/port.h" #include "port/port.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
@ -32,8 +33,6 @@ namespace rocksdb {
virtual void Logv(const InfoLogLevel log_level, virtual void Logv(const InfoLogLevel log_level,
const char* format, va_list ap); const char* format, va_list ap);
protected:
JNIEnv* getJniEnv() const;
private: private:
JavaVM* m_jvm; JavaVM* m_jvm;
jobject m_jLogger; jobject m_jLogger;
@ -44,6 +43,7 @@ namespace rocksdb {
jobject m_jerror_level; jobject m_jerror_level;
jobject m_jfatal_level; jobject m_jfatal_level;
jobject m_jheader_level; jobject m_jheader_level;
std::unique_ptr<char[]> format_str(const char* format, va_list ap) const;
}; };
} // namespace rocksdb } // namespace rocksdb

@ -25,13 +25,24 @@
/* /*
* Class: org_rocksdb_StringAppendOperator * Class: org_rocksdb_StringAppendOperator
* Method: newMergeOperatorHandle * Method: newSharedStringAppendOperator
* Signature: ()J * Signature: ()J
*/ */
jlong Java_org_rocksdb_StringAppendOperator_newMergeOperatorHandleImpl jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator
(JNIEnv* env, jobject jobj) { (JNIEnv* env, jclass jclazz) {
std::shared_ptr<rocksdb::MergeOperator> *op = auto* sptr_string_append_op = new std::shared_ptr<rocksdb::MergeOperator>(
new std::shared_ptr<rocksdb::MergeOperator>(); rocksdb::MergeOperators::CreateFromStringId("stringappend"));
*op = rocksdb::MergeOperators::CreateFromStringId("stringappend"); return reinterpret_cast<jlong>(sptr_string_append_op);
return reinterpret_cast<jlong>(op); }
/*
* Class: org_rocksdb_StringAppendOperator
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_StringAppendOperator_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* sptr_string_append_op =
reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>* >(jhandle);
delete sptr_string_append_op; // delete std::shared_ptr
} }

@ -39,7 +39,7 @@
* Signature: ()J * Signature: ()J
*/ */
jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) { jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) {
rocksdb::Options* op = new rocksdb::Options(); auto* op = new rocksdb::Options();
return reinterpret_cast<jlong>(op); return reinterpret_cast<jlong>(op);
} }
@ -53,7 +53,7 @@ jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions); auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
auto* cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>( auto* cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(
jcfoptions); jcfoptions);
rocksdb::Options* op = new rocksdb::Options(*dbOpt, *cfOpt); auto* op = new rocksdb::Options(*dbOpt, *cfOpt);
return reinterpret_cast<jlong>(op); return reinterpret_cast<jlong>(op);
} }
@ -64,7 +64,9 @@ jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
*/ */
void Java_org_rocksdb_Options_disposeInternal( void Java_org_rocksdb_Options_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::Options*>(handle); auto* op = reinterpret_cast<rocksdb::Options*>(handle);
assert(op != nullptr);
delete op;
} }
/* /*
@ -157,10 +159,16 @@ void Java_org_rocksdb_Options_setComparatorHandle__JJ(
*/ */
void Java_org_rocksdb_Options_setMergeOperatorName( void Java_org_rocksdb_Options_setMergeOperatorName(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) { JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
auto options = reinterpret_cast<rocksdb::Options*>(jhandle); const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
const char* op_name = env->GetStringUTFChars(jop_name, 0); if(op_name == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
options->merge_operator = rocksdb::MergeOperators::CreateFromStringId( options->merge_operator = rocksdb::MergeOperators::CreateFromStringId(
op_name); op_name);
env->ReleaseStringUTFChars(jop_name, op_name); env->ReleaseStringUTFChars(jop_name, op_name);
} }
@ -231,7 +239,7 @@ void Java_org_rocksdb_Options_createStatistics(
*/ */
jlong Java_org_rocksdb_Options_statisticsPtr( jlong Java_org_rocksdb_Options_statisticsPtr(
JNIEnv* env, jobject jobj, jlong jOptHandle) { JNIEnv* env, jobject jobj, jlong jOptHandle) {
auto st = reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics.get(); auto* st = reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics.get();
return reinterpret_cast<jlong>(st); return reinterpret_cast<jlong>(st);
} }
@ -381,7 +389,11 @@ jstring Java_org_rocksdb_Options_dbLogDir(
*/ */
void Java_org_rocksdb_Options_setDbLogDir( void Java_org_rocksdb_Options_setDbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0); const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if(log_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.assign(log_dir); reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.assign(log_dir);
env->ReleaseStringUTFChars(jdb_log_dir, log_dir); env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
} }
@ -404,7 +416,11 @@ jstring Java_org_rocksdb_Options_walDir(
*/ */
void Java_org_rocksdb_Options_setWalDir( void Java_org_rocksdb_Options_setWalDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) { JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) {
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.assign(wal_dir); reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.assign(wal_dir);
env->ReleaseStringUTFChars(jwal_dir, wal_dir); env->ReleaseStringUTFChars(jwal_dir, wal_dir);
} }
@ -494,8 +510,7 @@ void Java_org_rocksdb_Options_setMaxSubcompactions(
*/ */
jint Java_org_rocksdb_Options_maxSubcompactions( jint Java_org_rocksdb_Options_maxSubcompactions(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle) return reinterpret_cast<rocksdb::Options*>(jhandle)->max_subcompactions;
->max_subcompactions;
} }
/* /*
@ -641,7 +656,7 @@ jlong Java_org_rocksdb_Options_maxManifestFileSize(
*/ */
jstring Java_org_rocksdb_Options_memTableFactoryName( jstring Java_org_rocksdb_Options_memTableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::Options*>(jhandle); auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
// Should never be nullptr. // Should never be nullptr.
@ -677,17 +692,6 @@ void Java_org_rocksdb_Options_setMemTableFactory(
reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle)); reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
} }
/*
* Class: org_rocksdb_Options
* Method: setOldRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setOldRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->rate_limiter.reset(
reinterpret_cast<rocksdb::RateLimiter*>(jrate_limiter_handle));
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: setRateLimiter * Method: setRateLimiter
@ -1144,7 +1148,7 @@ jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(
*/ */
jstring Java_org_rocksdb_Options_tableFactoryName( jstring Java_org_rocksdb_Options_tableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::Options*>(jhandle); auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get(); rocksdb::TableFactory* tf = opt->table_factory.get();
// Should never be nullptr. // Should never be nullptr.
@ -1224,46 +1228,78 @@ jbyte Java_org_rocksdb_Options_compressionType(
return reinterpret_cast<rocksdb::Options*>(jhandle)->compression; return reinterpret_cast<rocksdb::Options*>(jhandle)->compression;
} }
/* /**
* Helper method to convert a Java list to a CompressionType * Helper method to convert a Java byte array of compression levels
* vector. * to a C++ vector of rocksdb::CompressionType
*/ *
std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper( * @param env A pointer to the Java environment
JNIEnv* env, jbyteArray jcompressionLevels) { * @param jcompression_levels A reference to a java byte array
std::vector<rocksdb::CompressionType> compressionLevels; * where each byte indicates a compression level
*
* @return A unique_ptr to the vector, or unique_ptr(nullptr) if a JNI exception occurs
*/
std::unique_ptr<std::vector<rocksdb::CompressionType>> rocksdb_compression_vector_helper(
JNIEnv* env, jbyteArray jcompression_levels) {
jsize len = env->GetArrayLength(jcompression_levels);
jbyte* jcompression_level =
env->GetByteArrayElements(jcompression_levels, nullptr);
if(jcompression_level == nullptr) {
// exception thrown: OutOfMemoryError
return std::unique_ptr<std::vector<rocksdb::CompressionType>>();
}
auto* compression_levels = new std::vector<rocksdb::CompressionType>();
std::unique_ptr<std::vector<rocksdb::CompressionType>> uptr_compression_levels(compression_levels);
jsize len = env->GetArrayLength(jcompressionLevels); for(jsize i = 0; i < len; i++) {
jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, jbyte jcl = jcompression_level[i];
NULL); compression_levels->push_back(static_cast<rocksdb::CompressionType>(jcl));
for(int i = 0; i < len; i++) {
jbyte jcl;
jcl = jcompressionLevel[i];
compressionLevels.push_back(static_cast<rocksdb::CompressionType>(jcl));
} }
env->ReleaseByteArrayElements(jcompressionLevels, jcompressionLevel,
env->ReleaseByteArrayElements(jcompression_levels, jcompression_level,
JNI_ABORT); JNI_ABORT);
return compressionLevels; return uptr_compression_levels;
} }
/* /**
* Helper method to convert a CompressionType vector to a Java * Helper method to convert a C++ vector of rocksdb::CompressionType
* List. * to a Java byte array of compression levels
*
* @param env A pointer to the Java environment
* @param jcompression_levels A reference to a java byte array
* where each byte indicates a compression level
*
* @return A jbytearray or nullptr if an exception occurs
*/ */
jbyteArray rocksdb_compression_list_helper(JNIEnv* env, jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
std::vector<rocksdb::CompressionType> compressionLevels) { std::vector<rocksdb::CompressionType> compression_levels) {
std::unique_ptr<jbyte[]> jbuf = const size_t len = compression_levels.size();
std::unique_ptr<jbyte[]>(new jbyte[compressionLevels.size()]); jbyte* jbuf = new jbyte[len];
for (std::vector<rocksdb::CompressionType>::size_type i = 0;
i != compressionLevels.size(); i++) { for (size_t i = 0; i < len; i++) {
jbuf[i] = compressionLevels[i]; jbuf[i] = compression_levels[i];
} }
// insert in java array // insert in java array
jbyteArray jcompressionLevels = env->NewByteArray( jbyteArray jcompression_levels = env->NewByteArray(static_cast<jsize>(len));
static_cast<jsize>(compressionLevels.size())); if(jcompression_levels == nullptr) {
env->SetByteArrayRegion(jcompressionLevels, 0, // exception thrown: OutOfMemoryError
static_cast<jsize>(compressionLevels.size()), jbuf.get()); delete [] jbuf;
return jcompressionLevels; return nullptr;
}
env->SetByteArrayRegion(jcompression_levels, 0, static_cast<jsize>(len),
jbuf);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jcompression_levels);
delete [] jbuf;
return nullptr;
}
delete [] jbuf;
return jcompression_levels;
} }
/* /*
@ -1274,10 +1310,14 @@ jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
void Java_org_rocksdb_Options_setCompressionPerLevel( void Java_org_rocksdb_Options_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle, JNIEnv* env, jobject jobj, jlong jhandle,
jbyteArray jcompressionLevels) { jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle); auto uptr_compression_levels =
std::vector<rocksdb::CompressionType> compressionLevels =
rocksdb_compression_vector_helper(env, jcompressionLevels); rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels; if(!uptr_compression_levels) {
// exception occurred
return;
}
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
options->compression_per_level = *(uptr_compression_levels.get());
} }
/* /*
@ -1946,7 +1986,6 @@ jlong Java_org_rocksdb_Options_memtableHugePageSize(
void Java_org_rocksdb_Options_setMemtableHugePageSize( void Java_org_rocksdb_Options_setMemtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle, JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmemtable_huge_page_size) { jlong jmemtable_huge_page_size) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
jmemtable_huge_page_size); jmemtable_huge_page_size);
if (s.ok()) { if (s.ok()) {
@ -2083,8 +2122,9 @@ void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
*/ */
jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional( jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::Options*>( auto mbflma =
jhandle)->max_bytes_for_level_multiplier_additional; reinterpret_cast<rocksdb::Options*>(jhandle)->
max_bytes_for_level_multiplier_additional;
const size_t size = mbflma.size(); const size_t size = mbflma.size();
@ -2095,7 +2135,19 @@ jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
jsize jlen = static_cast<jsize>(size); jsize jlen = static_cast<jsize>(size);
jintArray result = env->NewIntArray(jlen); jintArray result = env->NewIntArray(jlen);
if(result == nullptr) {
// exception thrown: OutOfMemoryError
delete [] additionals;
return nullptr;
}
env->SetIntArrayRegion(result, 0, jlen, additionals); env->SetIntArrayRegion(result, 0, jlen, additionals);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(result);
delete [] additionals;
return nullptr;
}
delete [] additionals; delete [] additionals;
@ -2112,12 +2164,20 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
jintArray jmax_bytes_for_level_multiplier_additional) { jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals = jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0); env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, nullptr);
if(additionals == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle); auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->max_bytes_for_level_multiplier_additional.clear(); opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) { for (jsize i = 0; i < len; i++) {
opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i])); opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
} }
env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
additionals, JNI_ABORT);
} }
/* /*
@ -2153,7 +2213,7 @@ void Java_org_rocksdb_Options_setParanoidFileChecks(
*/ */
jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
rocksdb::ColumnFamilyOptions* op = new rocksdb::ColumnFamilyOptions(); auto* op = new rocksdb::ColumnFamilyOptions();
return reinterpret_cast<jlong>(op); return reinterpret_cast<jlong>(op);
} }
@ -2164,14 +2224,20 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
*/ */
jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
JNIEnv* env, jclass jclazz, jstring jopt_string) { JNIEnv* env, jclass jclazz, jstring jopt_string) {
jlong ret_value = 0; const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
rocksdb::ColumnFamilyOptions* cf_options = if(opt_string == nullptr) {
new rocksdb::ColumnFamilyOptions(); // exception thrown: OutOfMemoryError
const char* opt_string = env->GetStringUTFChars(jopt_string, 0); return 0;
}
auto* cf_options = new rocksdb::ColumnFamilyOptions();
rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString( rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString(
rocksdb::ColumnFamilyOptions(), opt_string, cf_options); rocksdb::ColumnFamilyOptions(), opt_string, cf_options);
env->ReleaseStringUTFChars(jopt_string, opt_string); env->ReleaseStringUTFChars(jopt_string, opt_string);
// Check if ColumnFamilyOptions creation was possible. // Check if ColumnFamilyOptions creation was possible.
jlong ret_value = 0;
if (status.ok()) { if (status.ok()) {
ret_value = reinterpret_cast<jlong>(cf_options); ret_value = reinterpret_cast<jlong>(cf_options);
} else { } else {
@ -2189,7 +2255,9 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
*/ */
void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal( void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle); auto* cfo = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle);
assert(cfo != nullptr);
delete cfo;
} }
/* /*
@ -2265,10 +2333,15 @@ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJ(
*/ */
void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName( void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) { JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
auto options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
const char* op_name = env->GetStringUTFChars(jop_name, 0); const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
options->merge_operator = rocksdb::MergeOperators::CreateFromStringId( if(op_name == nullptr) {
op_name); // exception thrown: OutOfMemoryError
return;
}
options->merge_operator =
rocksdb::MergeOperators::CreateFromStringId(op_name);
env->ReleaseStringUTFChars(jop_name, op_name); env->ReleaseStringUTFChars(jop_name, op_name);
} }
@ -2364,7 +2437,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory(
*/ */
jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName( jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
// Should never be nullptr. // Should never be nullptr.
@ -2418,7 +2491,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory(
*/ */
jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName( jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get(); rocksdb::TableFactory* tf = opt->table_factory.get();
// Should never be nullptr. // Should never be nullptr.
@ -2508,9 +2581,13 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle, JNIEnv* env, jobject jobj, jlong jhandle,
jbyteArray jcompressionLevels) { jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels = auto uptr_compression_levels =
rocksdb_compression_vector_helper(env, jcompressionLevels); rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels; if(!uptr_compression_levels) {
// exception occurred
return;
}
options->compression_per_level = *(uptr_compression_levels.get());
} }
/* /*
@ -2520,9 +2597,9 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
*/ */
jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb_compression_list_helper(env, return rocksdb_compression_list_helper(env,
options->compression_per_level); cf_options->compression_per_level);
} }
/* /*
@ -2668,7 +2745,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger(
*/ */
jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel( jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
return 0; return 0; // deprecated and intentionally not implemented, see the Java code
} }
/* /*
@ -2677,7 +2754,9 @@ jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel(
* Signature: (JI)V * Signature: (JI)V
*/ */
void Java_org_rocksdb_ColumnFamilyOptions_setMaxMemCompactionLevel( void Java_org_rocksdb_ColumnFamilyOptions_setMaxMemCompactionLevel(
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) {} JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) {
// deprecated and intentionally not implemented, see the Java code
}
/* /*
* Class: org_rocksdb_ColumnFamilyOptions * Class: org_rocksdb_ColumnFamilyOptions
@ -3308,9 +3387,19 @@ jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditio
} }
jsize jlen = static_cast<jsize>(size); jsize jlen = static_cast<jsize>(size);
jintArray result; jintArray result = env->NewIntArray(jlen);
result = env->NewIntArray(jlen); if(result == nullptr) {
// exception thrown: OutOfMemoryError
delete [] additionals;
return nullptr;
}
env->SetIntArrayRegion(result, 0, jlen, additionals); env->SetIntArrayRegion(result, 0, jlen, additionals);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(result);
delete [] additionals;
return nullptr;
}
delete [] additionals; delete [] additionals;
@ -3328,11 +3417,19 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditiona
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals = jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0); env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
if(additionals == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* cf_opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle); auto* cf_opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opt->max_bytes_for_level_multiplier_additional.clear(); cf_opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) { for (jsize i = 0; i < len; i++) {
cf_opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i])); cf_opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
} }
env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
additionals, JNI_ABORT);
} }
/* /*
@ -3369,7 +3466,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
*/ */
jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env, jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
jclass jcls) { jclass jcls) {
rocksdb::DBOptions* dbop = new rocksdb::DBOptions(); auto* dbop = new rocksdb::DBOptions();
return reinterpret_cast<jlong>(dbop); return reinterpret_cast<jlong>(dbop);
} }
@ -3380,14 +3477,20 @@ jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
*/ */
jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps( jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
JNIEnv* env, jclass jclazz, jstring jopt_string) { JNIEnv* env, jclass jclazz, jstring jopt_string) {
jlong ret_value = 0; const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
rocksdb::DBOptions* db_options = if(opt_string == nullptr) {
new rocksdb::DBOptions(); // exception thrown: OutOfMemoryError
const char* opt_string = env->GetStringUTFChars(jopt_string, 0); return 0;
}
auto* db_options = new rocksdb::DBOptions();
rocksdb::Status status = rocksdb::GetDBOptionsFromString( rocksdb::Status status = rocksdb::GetDBOptionsFromString(
rocksdb::DBOptions(), opt_string, db_options); rocksdb::DBOptions(), opt_string, db_options);
env->ReleaseStringUTFChars(jopt_string, opt_string); env->ReleaseStringUTFChars(jopt_string, opt_string);
// Check if DBOptions creation was possible. // Check if DBOptions creation was possible.
jlong ret_value = 0;
if (status.ok()) { if (status.ok()) {
ret_value = reinterpret_cast<jlong>(db_options); ret_value = reinterpret_cast<jlong>(db_options);
} else { } else {
@ -3405,7 +3508,9 @@ jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
*/ */
void Java_org_rocksdb_DBOptions_disposeInternal( void Java_org_rocksdb_DBOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::DBOptions*>(handle); auto* dbo = reinterpret_cast<rocksdb::DBOptions*>(handle);
assert(dbo != nullptr);
delete dbo;
} }
/* /*
@ -3505,17 +3610,6 @@ jboolean Java_org_rocksdb_DBOptions_paranoidChecks(
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks; return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks;
} }
/*
* Class: org_rocksdb_DBOptions
* Method: setOldRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setOldRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->rate_limiter.reset(
reinterpret_cast<rocksdb::RateLimiter*>(jrate_limiter_handle));
}
/* /*
* Class: org_rocksdb_DBOptions * Class: org_rocksdb_DBOptions
* Method: setRateLimiter * Method: setRateLimiter
@ -3626,7 +3720,7 @@ void Java_org_rocksdb_DBOptions_createStatistics(
*/ */
jlong Java_org_rocksdb_DBOptions_statisticsPtr( jlong Java_org_rocksdb_DBOptions_statisticsPtr(
JNIEnv* env, jobject jobj, jlong jOptHandle) { JNIEnv* env, jobject jobj, jlong jOptHandle) {
auto st = reinterpret_cast<rocksdb::DBOptions*>(jOptHandle)-> auto* st = reinterpret_cast<rocksdb::DBOptions*>(jOptHandle)->
statistics.get(); statistics.get();
return reinterpret_cast<jlong>(st); return reinterpret_cast<jlong>(st);
} }
@ -3659,7 +3753,12 @@ jboolean Java_org_rocksdb_DBOptions_useFsync(
*/ */
void Java_org_rocksdb_DBOptions_setDbLogDir( void Java_org_rocksdb_DBOptions_setDbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0); const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if(log_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.assign(log_dir); reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.assign(log_dir);
env->ReleaseStringUTFChars(jdb_log_dir, log_dir); env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
} }
@ -4307,19 +4406,17 @@ jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(
} }
void Java_org_rocksdb_DBOptions_setDelayedWriteRate( void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
JNIEnv* env, jobject jobj, jlong jhandle, jlong delay_write_rate){ JNIEnv* env, jobject jobj, jlong jhandle, jlong delay_write_rate) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->delayed_write_rate =
reinterpret_cast<rocksdb::DBOptions*>(jhandle)-> static_cast<int64_t>(delay_write_rate);
delayed_write_rate = static_cast<int64_t>(delay_write_rate); }
}
jlong Java_org_rocksdb_DBOptions_delayedWriteRate( jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
JNIEnv* env, jobject jobj, jlong jhandle){ JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
delayed_write_rate;
}
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
delayed_write_rate;
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// rocksdb::WriteOptions // rocksdb::WriteOptions
@ -4330,7 +4427,7 @@ void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
*/ */
jlong Java_org_rocksdb_WriteOptions_newWriteOptions( jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
rocksdb::WriteOptions* op = new rocksdb::WriteOptions(); auto* op = new rocksdb::WriteOptions();
return reinterpret_cast<jlong>(op); return reinterpret_cast<jlong>(op);
} }
@ -4341,7 +4438,8 @@ jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
*/ */
void Java_org_rocksdb_WriteOptions_disposeInternal( void Java_org_rocksdb_WriteOptions_disposeInternal(
JNIEnv* env, jobject jwrite_options, jlong jhandle) { JNIEnv* env, jobject jwrite_options, jlong jhandle) {
auto write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle); auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
assert(write_options != nullptr);
delete write_options; delete write_options;
} }
@ -4395,8 +4493,8 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL(
*/ */
jlong Java_org_rocksdb_ReadOptions_newReadOptions( jlong Java_org_rocksdb_ReadOptions_newReadOptions(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
auto read_opt = new rocksdb::ReadOptions(); auto* read_options = new rocksdb::ReadOptions();
return reinterpret_cast<jlong>(read_opt); return reinterpret_cast<jlong>(read_options);
} }
/* /*
@ -4406,7 +4504,9 @@ jlong Java_org_rocksdb_ReadOptions_newReadOptions(
*/ */
void Java_org_rocksdb_ReadOptions_disposeInternal( void Java_org_rocksdb_ReadOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ReadOptions*>(jhandle); auto* read_options = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
assert(read_options != nullptr);
delete read_options;
} }
/* /*
@ -4613,7 +4713,7 @@ void Java_org_rocksdb_ReadOptions_setReadTier(
*/ */
jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions( jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
auto comparator_opt = new rocksdb::ComparatorJniCallbackOptions(); auto* comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
return reinterpret_cast<jlong>(comparator_opt); return reinterpret_cast<jlong>(comparator_opt);
} }
@ -4646,7 +4746,10 @@ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex(
*/ */
void Java_org_rocksdb_ComparatorOptions_disposeInternal( void Java_org_rocksdb_ComparatorOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) { JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle); auto* comparator_opt =
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
assert(comparator_opt != nullptr);
delete comparator_opt;
} }
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
@ -4659,7 +4762,7 @@ void Java_org_rocksdb_ComparatorOptions_disposeInternal(
*/ */
jlong Java_org_rocksdb_FlushOptions_newFlushOptions( jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
auto flush_opt = new rocksdb::FlushOptions(); auto* flush_opt = new rocksdb::FlushOptions();
return reinterpret_cast<jlong>(flush_opt); return reinterpret_cast<jlong>(flush_opt);
} }
@ -4692,5 +4795,7 @@ jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
*/ */
void Java_org_rocksdb_FlushOptions_disposeInternal( void Java_org_rocksdb_FlushOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) { JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::FlushOptions*>(jhandle); auto* flush_opt = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
assert(flush_opt != nullptr);
delete flush_opt;
} }

File diff suppressed because it is too large Load Diff

@ -6,24 +6,9 @@
// This file implements the "bridge" between Java and C++ for RateLimiter. // This file implements the "bridge" between Java and C++ for RateLimiter.
#include "rocksjni/portal.h" #include "rocksjni/portal.h"
#include "include/org_rocksdb_GenericRateLimiterConfig.h"
#include "include/org_rocksdb_RateLimiter.h" #include "include/org_rocksdb_RateLimiter.h"
#include "rocksdb/rate_limiter.h" #include "rocksdb/rate_limiter.h"
/*
* Class: org_rocksdb_GenericRateLimiterConfig
* Method: newRateLimiterHandle
* Signature: (JJI)J
*/
jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle(
JNIEnv* env, jobject jobj, jlong jrate_bytes_per_second,
jlong jrefill_period_micros, jint jfairness) {
return reinterpret_cast<jlong>(rocksdb::NewGenericRateLimiter(
static_cast<int64_t>(jrate_bytes_per_second),
static_cast<int64_t>(jrefill_period_micros),
static_cast<int32_t>(jfairness)));
}
/* /*
* Class: org_rocksdb_RateLimiter * Class: org_rocksdb_RateLimiter
* Method: newRateLimiterHandle * Method: newRateLimiterHandle
@ -32,16 +17,13 @@ jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle(
jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle( jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
JNIEnv* env, jclass jclazz, jlong jrate_bytes_per_second, JNIEnv* env, jclass jclazz, jlong jrate_bytes_per_second,
jlong jrefill_period_micros, jint jfairness) { jlong jrefill_period_micros, jint jfairness) {
auto* rate_limiter = rocksdb::NewGenericRateLimiter( auto * sptr_rate_limiter =
static_cast<int64_t>(jrate_bytes_per_second), new std::shared_ptr<rocksdb::RateLimiter>(rocksdb::NewGenericRateLimiter(
static_cast<int64_t>(jrefill_period_micros), static_cast<int64_t>(jrate_bytes_per_second),
static_cast<int32_t>(jfairness)); static_cast<int64_t>(jrefill_period_micros),
static_cast<int32_t>(jfairness)));
std::shared_ptr<rocksdb::RateLimiter> *ptr_sptr_rate_limiter = return reinterpret_cast<jlong>(sptr_rate_limiter);
new std::shared_ptr<rocksdb::RateLimiter>;
*ptr_sptr_rate_limiter = std::shared_ptr<rocksdb::RateLimiter>(rate_limiter);
return reinterpret_cast<jlong>(ptr_sptr_rate_limiter);
} }
/* /*
@ -51,10 +33,9 @@ jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
*/ */
void Java_org_rocksdb_RateLimiter_disposeInternal( void Java_org_rocksdb_RateLimiter_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) { JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::RateLimiter> *handle = auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jhandle); reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jhandle);
handle->reset(); delete handle; // delete std::shared_ptr
delete handle;
} }
/* /*
@ -65,8 +46,8 @@ void Java_org_rocksdb_RateLimiter_disposeInternal(
void Java_org_rocksdb_RateLimiter_setBytesPerSecond( void Java_org_rocksdb_RateLimiter_setBytesPerSecond(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes_per_second) { jlong jbytes_per_second) {
reinterpret_cast<rocksdb::RateLimiter*>( reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
handle)->SetBytesPerSecond(jbytes_per_second); SetBytesPerSecond(jbytes_per_second);
} }
/* /*
@ -77,9 +58,8 @@ void Java_org_rocksdb_RateLimiter_setBytesPerSecond(
void Java_org_rocksdb_RateLimiter_request( void Java_org_rocksdb_RateLimiter_request(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes) { jlong jbytes) {
reinterpret_cast<rocksdb::RateLimiter*>( reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
handle)->Request(jbytes, Request(jbytes, rocksdb::Env::IO_TOTAL);
rocksdb::Env::IO_TOTAL);
} }
/* /*
@ -88,10 +68,9 @@ void Java_org_rocksdb_RateLimiter_request(
* Signature: (J)J * Signature: (J)J
*/ */
jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes( jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle) {
jlong jbytes) { return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
return reinterpret_cast<rocksdb::RateLimiter*>( get()->GetSingleBurstBytes();
handle)->GetSingleBurstBytes();
} }
/* /*
@ -100,10 +79,9 @@ jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(
* Signature: (J)J * Signature: (J)J
*/ */
jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough( jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle) {
jlong jbytes) { return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
return reinterpret_cast<rocksdb::RateLimiter*>( get()->GetTotalBytesThrough();
handle)->GetTotalBytesThrough();
} }
/* /*
@ -112,8 +90,7 @@ jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(
* Signature: (J)J * Signature: (J)J
*/ */
jlong Java_org_rocksdb_RateLimiter_getTotalRequests( jlong Java_org_rocksdb_RateLimiter_getTotalRequests(
JNIEnv* env, jobject jobj, jlong handle, JNIEnv* env, jobject jobj, jlong handle) {
jlong jbytes) { return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
return reinterpret_cast<rocksdb::RateLimiter*>( get()->GetTotalRequests();
handle)->GetTotalRequests();
} }

@ -22,7 +22,7 @@
*/ */
jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env, jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
jclass jcls, jboolean keep_log_files) { jclass jcls, jboolean keep_log_files) {
auto ropt = new rocksdb::RestoreOptions(keep_log_files); auto* ropt = new rocksdb::RestoreOptions(keep_log_files);
return reinterpret_cast<jlong>(ropt); return reinterpret_cast<jlong>(ropt);
} }
@ -33,7 +33,7 @@ jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
*/ */
void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj, void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj,
jlong jhandle) { jlong jhandle) {
auto ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle); auto* ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle);
assert(ropt); assert(ropt);
delete ropt; delete ropt;
} }

File diff suppressed because it is too large Load Diff

@ -26,8 +26,17 @@
*/ */
jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString( jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
JNIEnv * env, jclass jcls, jstring jstr) { JNIEnv * env, jclass jcls, jstring jstr) {
const auto* str = env->GetStringUTFChars(jstr, NULL); const auto* str = env->GetStringUTFChars(jstr, nullptr);
if(str == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const size_t len = strlen(str); const size_t len = strlen(str);
// NOTE: buf will be deleted in the
// Java_org_rocksdb_Slice_disposeInternalBuf or
// or Java_org_rocksdb_DirectSlice_disposeInternalBuf methods
char* buf = new char[len + 1]; char* buf = new char[len + 1];
memcpy(buf, str, len); memcpy(buf, str, len);
buf[len] = 0; buf[len] = 0;
@ -118,13 +127,18 @@ void Java_org_rocksdb_AbstractSlice_disposeInternal(
*/ */
jlong Java_org_rocksdb_Slice_createNewSlice0( jlong Java_org_rocksdb_Slice_createNewSlice0(
JNIEnv * env, jclass jcls, jbyteArray data, jint offset) { JNIEnv * env, jclass jcls, jbyteArray data, jint offset) {
const jsize dataSize = env->GetArrayLength(data); const jsize dataSize = env->GetArrayLength(data);
const int len = dataSize - offset; const int len = dataSize - offset;
jbyte* ptrData = new jbyte[len];
env->GetByteArrayRegion(data, offset, len, ptrData);
const auto* slice = new rocksdb::Slice((const char*)ptrData, len); // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
jbyte* buf = new jbyte[len];
env->GetByteArrayRegion(data, offset, len, buf);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
return 0;
}
const auto* slice = new rocksdb::Slice((const char*)buf, len);
return reinterpret_cast<jlong>(slice); return reinterpret_cast<jlong>(slice);
} }
@ -135,16 +149,17 @@ jlong Java_org_rocksdb_Slice_createNewSlice0(
*/ */
jlong Java_org_rocksdb_Slice_createNewSlice1( jlong Java_org_rocksdb_Slice_createNewSlice1(
JNIEnv * env, jclass jcls, jbyteArray data) { JNIEnv * env, jclass jcls, jbyteArray data) {
jbyte* ptrData = env->GetByteArrayElements(data, nullptr);
if(ptrData == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const int len = env->GetArrayLength(data) + 1; const int len = env->GetArrayLength(data) + 1;
jboolean isCopy; // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
jbyte* ptrData = env->GetByteArrayElements(data, &isCopy);
// NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method
char* buf = new char[len]; char* buf = new char[len];
memcpy(buf, ptrData, len - 1); memcpy(buf, ptrData, len - 1);
buf[len-1]='\0'; buf[len-1] = '\0';
const auto* slice = const auto* slice =
new rocksdb::Slice(buf, len - 1); new rocksdb::Slice(buf, len - 1);
@ -162,22 +177,61 @@ jlong Java_org_rocksdb_Slice_createNewSlice1(
jbyteArray Java_org_rocksdb_Slice_data0( jbyteArray Java_org_rocksdb_Slice_data0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle); const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const int len = static_cast<int>(slice->size()); const jsize len = static_cast<jsize>(slice->size());
const jbyteArray data = env->NewByteArray(len); const jbyteArray data = env->NewByteArray(len);
if(data == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(data, 0, len, env->SetByteArrayRegion(data, 0, len,
reinterpret_cast<const jbyte*>(slice->data())); reinterpret_cast<const jbyte*>(slice->data()));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(data);
return nullptr;
}
return data; return data;
} }
/*
* Class: org_rocksdb_Slice
* Method: clear0
* Signature: (JZJ)V
*/
void Java_org_rocksdb_Slice_clear0(
JNIEnv * env, jobject jobj, jlong handle, jboolean shouldRelease,
jlong internalBufferOffset) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
if(shouldRelease == JNI_TRUE) {
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
slice->clear();
}
/*
* Class: org_rocksdb_Slice
* Method: removePrefix0
* Signature: (JI)V
*/
void Java_org_rocksdb_Slice_removePrefix0(
JNIEnv * env, jobject jobj, jlong handle, jint length) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
slice->remove_prefix(length);
}
/* /*
* Class: org_rocksdb_Slice * Class: org_rocksdb_Slice
* Method: disposeInternalBuf * Method: disposeInternalBuf
* Signature: (J)V * Signature: (JJ)V
*/ */
void Java_org_rocksdb_Slice_disposeInternalBuf( void Java_org_rocksdb_Slice_disposeInternalBuf(
JNIEnv * env, jobject jobj, jlong handle) { JNIEnv * env, jobject jobj, jlong handle, jlong internalBufferOffset) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle); const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
delete [] slice->data_; const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
} }
// </editor-fold> // </editor-fold>
@ -191,8 +245,19 @@ void Java_org_rocksdb_Slice_disposeInternalBuf(
*/ */
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0( jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
JNIEnv* env, jclass jcls, jobject data, jint length) { JNIEnv* env, jclass jcls, jobject data, jint length) {
assert(data != nullptr);
void* data_addr = env->GetDirectBufferAddress(data);
if(data_addr == nullptr) {
// error: memory region is undefined, given object is not a direct
// java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument(
"Could not access DirectBuffer"));
return 0;
}
const auto* ptrData = const auto* ptrData =
reinterpret_cast<char*>(env->GetDirectBufferAddress(data)); reinterpret_cast<char*>(data_addr);
const auto* slice = new rocksdb::Slice(ptrData, length); const auto* slice = new rocksdb::Slice(ptrData, length);
return reinterpret_cast<jlong>(slice); return reinterpret_cast<jlong>(slice);
} }
@ -204,8 +269,17 @@ jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
*/ */
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1( jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(
JNIEnv* env, jclass jcls, jobject data) { JNIEnv* env, jclass jcls, jobject data) {
const auto* ptrData = void* data_addr = env->GetDirectBufferAddress(data);
reinterpret_cast<char*>(env->GetDirectBufferAddress(data)); if(data_addr == nullptr) {
// error: memory region is undefined, given object is not a direct
// java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument(
"Could not access DirectBuffer"));
return 0;
}
const auto* ptrData = reinterpret_cast<char*>(data_addr);
const auto* slice = new rocksdb::Slice(ptrData); const auto* slice = new rocksdb::Slice(ptrData);
return reinterpret_cast<jlong>(slice); return reinterpret_cast<jlong>(slice);
} }
@ -236,12 +310,16 @@ jbyte Java_org_rocksdb_DirectSlice_get0(
/* /*
* Class: org_rocksdb_DirectSlice * Class: org_rocksdb_DirectSlice
* Method: clear0 * Method: clear0
* Signature: (J)V * Signature: (JZJ)V
*/ */
void Java_org_rocksdb_DirectSlice_clear0( void Java_org_rocksdb_DirectSlice_clear0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle,
jboolean shouldRelease, jlong internalBufferOffset) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle); auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
delete [] slice->data_; if(shouldRelease == JNI_TRUE) {
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
slice->clear(); slice->clear();
} }
@ -256,4 +334,16 @@ void Java_org_rocksdb_DirectSlice_removePrefix0(
slice->remove_prefix(length); slice->remove_prefix(length);
} }
/*
* Class: org_rocksdb_DirectSlice
* Method: disposeInternalBuf
* Signature: (JJ)V
*/
void Java_org_rocksdb_DirectSlice_disposeInternalBuf(
JNIEnv* env, jobject jobj, jlong handle, jlong internalBufferOffset) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
// </editor-fold> // </editor-fold>

@ -42,7 +42,11 @@ jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter(JNIEnv *env, jclass jcls,
*/ */
void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject jobj, void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject jobj,
jlong jhandle, jstring jfile_path) { jlong jhandle, jstring jfile_path) {
const char *file_path = env->GetStringUTFChars(jfile_path, NULL); const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
if(file_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Status s = rocksdb::Status s =
reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Open(file_path); reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Open(file_path);
env->ReleaseStringUTFChars(jfile_path, file_path); env->ReleaseStringUTFChars(jfile_path, file_path);
@ -62,8 +66,9 @@ void Java_org_rocksdb_SstFileWriter_add(JNIEnv *env, jobject jobj,
jlong jvalue_handle) { jlong jvalue_handle) {
auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle); auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle);
auto *value_slice = reinterpret_cast<rocksdb::Slice *>(jvalue_handle); auto *value_slice = reinterpret_cast<rocksdb::Slice *>(jvalue_handle);
rocksdb::Status s = reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Add( rocksdb::Status s =
*key_slice, *value_slice); reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Add(*key_slice,
*value_slice);
if (!s.ok()) { if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s); rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
} }

@ -21,9 +21,8 @@
*/ */
jlong Java_org_rocksdb_Statistics_getTickerCount0( jlong Java_org_rocksdb_Statistics_getTickerCount0(
JNIEnv* env, jobject jobj, jint tickerType, jlong handle) { JNIEnv* env, jobject jobj, jint tickerType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle); auto* st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr); assert(st != nullptr);
return st->getTickerCount(static_cast<rocksdb::Tickers>(tickerType)); return st->getTickerCount(static_cast<rocksdb::Tickers>(tickerType));
} }
@ -34,17 +33,28 @@ jlong Java_org_rocksdb_Statistics_getTickerCount0(
*/ */
jobject Java_org_rocksdb_Statistics_getHistogramData0( jobject Java_org_rocksdb_Statistics_getHistogramData0(
JNIEnv* env, jobject jobj, jint histogramType, jlong handle) { JNIEnv* env, jobject jobj, jint histogramType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle); auto* st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr); assert(st != nullptr);
rocksdb::HistogramData data; rocksdb::HistogramData data;
st->histogramData(static_cast<rocksdb::Histograms>(histogramType), st->histogramData(static_cast<rocksdb::Histograms>(histogramType),
&data); &data);
// Don't reuse class pointer jclass jclazz = rocksdb::HistogramDataJni::getJClass(env);
jclass jclazz = env->FindClass("org/rocksdb/HistogramData"); if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId( jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId(
env, jclazz); env);
return env->NewObject(jclazz, mid, data.median, data.percentile95, if(mid == nullptr) {
data.percentile99, data.average, data.standard_deviation); // exception occurred accessing method
return nullptr;
}
return env->NewObject(
jclazz,
mid, data.median, data.percentile95,data.percentile99, data.average,
data.standard_deviation);
} }

@ -67,12 +67,5 @@ jobject Java_org_rocksdb_TransactionLogIterator_getBatch(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
rocksdb::BatchResult batch_result = rocksdb::BatchResult batch_result =
reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->GetBatch(); reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->GetBatch();
jclass jclazz = env->FindClass( return rocksdb::BatchResultJni::construct(env, batch_result);
"org/rocksdb/TransactionLogIterator$BatchResult");
assert(jclazz != nullptr);
jmethodID mid = env->GetMethodID(
jclazz, "<init>", "(Lorg/rocksdb/TransactionLogIterator;JJ)V");
assert(mid != nullptr);
return env->NewObject(jclazz, mid, jobj,
batch_result.sequence, batch_result.writeBatchPtr.release());
} }

@ -26,9 +26,14 @@
jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env,
jclass jcls, jlong joptions_handle, jstring jdb_path, jclass jcls, jlong joptions_handle, jstring jdb_path,
jint jttl, jboolean jread_only) { jint jttl, jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if(db_path == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* opt = reinterpret_cast<rocksdb::Options*>(joptions_handle); auto* opt = reinterpret_cast<rocksdb::Options*>(joptions_handle);
rocksdb::DBWithTTL* db = nullptr; rocksdb::DBWithTTL* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, &db, rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, &db,
jttl, jread_only); jttl, jread_only);
env->ReleaseStringUTFChars(jdb_path, db_path); env->ReleaseStringUTFChars(jdb_path, db_path);
@ -53,49 +58,69 @@ jlongArray
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options, jobjectArray jcolumn_names, jlongArray jcolumn_options,
jintArray jttls, jboolean jread_only) { jintArray jttls, jboolean jread_only) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle); const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
const char* db_path = env->GetStringUTFChars(jdb_path, NULL); if(db_path == nullptr) {
// exception thrown: OutOfMemoryError
std::vector<rocksdb::ColumnFamilyDescriptor> column_families; return 0;
}
jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL);
for(int i = 0; i < len_cols; i++) {
jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL);
const int jcf_name_len = env->GetArrayLength(jcn_ba);
//TODO(AR) do I need to make a copy of jco[i] ? const jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
if(jco == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
std::string cf_name (reinterpret_cast<char *>(jcf_name), jcf_name_len); std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
rocksdb::ColumnFamilyOptions* cf_options = jboolean has_exception = JNI_FALSE;
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[i]); rocksdb::JniUtil::byteStrings<std::string>(
column_families.push_back( env,
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); jcolumn_names,
[](const char* str_data, const size_t str_len) {
return std::string(str_data, str_len);
},
[&jco, &column_families](size_t idx, std::string cf_name) {
rocksdb::ColumnFamilyOptions* cf_options =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[idx]);
column_families.push_back(
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
},
&has_exception);
env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
env->DeleteLocalRef(jcn);
}
env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
std::vector<rocksdb::ColumnFamilyHandle*> handles; if(has_exception == JNI_TRUE) {
rocksdb::DBWithTTL* db = nullptr; // exception occured
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
std::vector<int32_t> ttl_values; std::vector<int32_t> ttl_values;
jint* jttlv = env->GetIntArrayElements(jttls, NULL); jint* jttlv = env->GetIntArrayElements(jttls, nullptr);
jsize len_ttls = env->GetArrayLength(jttls); if(jttlv == nullptr) {
for(int i = 0; i < len_ttls; i++) { // exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
const jsize len_ttls = env->GetArrayLength(jttls);
for(jsize i = 0; i < len_ttls; i++) {
ttl_values.push_back(jttlv[i]); ttl_values.push_back(jttlv[i]);
} }
env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT); env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT);
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
std::vector<rocksdb::ColumnFamilyHandle*> handles;
rocksdb::DBWithTTL* db = nullptr;
rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families, rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families,
&handles, &db, ttl_values, jread_only); &handles, &db, ttl_values, jread_only);
// we have now finished with db_path
env->ReleaseStringUTFChars(jdb_path, db_path);
// check if open operation was successful // check if open operation was successful
if (s.ok()) { if (s.ok()) {
jsize resultsLen = 1 + len_cols; //db handle + column family handles const jsize resultsLen = 1 + len_cols; //db handle + column family handles
std::unique_ptr<jlong[]> results = std::unique_ptr<jlong[]> results =
std::unique_ptr<jlong[]>(new jlong[resultsLen]); std::unique_ptr<jlong[]>(new jlong[resultsLen]);
results[0] = reinterpret_cast<jlong>(db); results[0] = reinterpret_cast<jlong>(db);
@ -104,7 +129,18 @@ jlongArray
} }
jlongArray jresults = env->NewLongArray(resultsLen); jlongArray jresults = env->NewLongArray(resultsLen);
if(jresults == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jresults);
return nullptr;
}
return jresults; return jresults;
} else { } else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s); rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
@ -120,18 +156,23 @@ jlongArray
jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl( jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
JNIEnv* env, jobject jobj, jlong jdb_handle, JNIEnv* env, jobject jobj, jlong jdb_handle,
jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) { jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) {
rocksdb::ColumnFamilyHandle* handle;
auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0); jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
const int len = env->GetArrayLength(jcolumn_name); if(cfname == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const jsize len = env->GetArrayLength(jcolumn_name);
auto* cfOptions = auto* cfOptions =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options); reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
rocksdb::ColumnFamilyHandle* handle;
rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl( rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl(
*cfOptions, std::string(reinterpret_cast<char *>(cfname), *cfOptions, std::string(reinterpret_cast<char *>(cfname),
len), &handle, jttl); len), &handle, jttl);
env->ReleaseByteArrayElements(jcolumn_name, cfname, 0); env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
if (s.ok()) { if (s.ok()) {

@ -30,8 +30,7 @@
*/ */
jlong Java_org_rocksdb_WriteBatch_newWriteBatch( jlong Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jclass jcls, jint jreserved_bytes) { JNIEnv* env, jclass jcls, jint jreserved_bytes) {
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch( auto* wb = new rocksdb::WriteBatch(static_cast<size_t>(jreserved_bytes));
static_cast<size_t>(jreserved_bytes));
return reinterpret_cast<jlong>(wb); return reinterpret_cast<jlong>(wb);
} }
@ -244,7 +243,9 @@ void Java_org_rocksdb_WriteBatch_iterate(
*/ */
void Java_org_rocksdb_WriteBatch_disposeInternal( void Java_org_rocksdb_WriteBatch_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::WriteBatch*>(handle); auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(handle);
assert(wb != nullptr);
delete wb;
} }
/* /*
@ -254,9 +255,8 @@ void Java_org_rocksdb_WriteBatch_disposeInternal(
*/ */
jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0( jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
JNIEnv* env, jobject jobj) { JNIEnv* env, jobject jobj) {
const rocksdb::WriteBatchHandlerJniCallback* h = auto* wbjnic = new rocksdb::WriteBatchHandlerJniCallback(env, jobj);
new rocksdb::WriteBatchHandlerJniCallback(env, jobj); return reinterpret_cast<jlong>(wbjnic);
return reinterpret_cast<jlong>(h);
} }
/* /*
@ -266,5 +266,8 @@ jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
*/ */
void Java_org_rocksdb_WriteBatch_00024Handler_disposeInternal( void Java_org_rocksdb_WriteBatch_00024Handler_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handle); auto* wbjnic =
reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handle);
assert(wbjnic != nullptr);
delete wbjnic;
} }

@ -101,8 +101,18 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
delete mem->Unref(); delete mem->Unref();
jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size())); jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
if(jstate == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()), env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()),
reinterpret_cast<const jbyte*>(state.c_str())); reinterpret_cast<const jbyte*>(state.c_str()));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jstate);
return nullptr;
}
return jstate; return jstate;
} }

@ -19,7 +19,7 @@
*/ */
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
JNIEnv* env, jclass jcls) { JNIEnv* env, jclass jcls) {
rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex(); auto* wbwi = new rocksdb::WriteBatchWithIndex();
return reinterpret_cast<jlong>(wbwi); return reinterpret_cast<jlong>(wbwi);
} }
@ -30,9 +30,9 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
*/ */
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
JNIEnv* env, jclass jcls, jboolean joverwrite_key) { JNIEnv* env, jclass jcls, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi = auto* wbwi =
new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0,
static_cast<bool>(joverwrite_key)); static_cast<bool>(joverwrite_key));
return reinterpret_cast<jlong>(wbwi); return reinterpret_cast<jlong>(wbwi);
} }
@ -44,10 +44,10 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ(
JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle, JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle,
jint jreserved_bytes, jboolean joverwrite_key) { jint jreserved_bytes, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi = auto* wbwi =
new rocksdb::WriteBatchWithIndex( new rocksdb::WriteBatchWithIndex(
reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle), reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle),
static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key)); static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key));
return reinterpret_cast<jlong>(wbwi); return reinterpret_cast<jlong>(wbwi);
} }
@ -241,7 +241,7 @@ void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0(
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0( jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(
JNIEnv* env, jobject jobj, jlong jwbwi_handle) { JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle); auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(); auto* wbwi_iterator = wbwi->NewIterator();
return reinterpret_cast<jlong>(wbwi_iterator); return reinterpret_cast<jlong>(wbwi_iterator);
} }
@ -254,7 +254,7 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) { JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle); auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle); auto* wbwi_iterator = wbwi->NewIterator(cf_handle);
return reinterpret_cast<jlong>(wbwi_iterator); return reinterpret_cast<jlong>(wbwi_iterator);
} }
@ -362,6 +362,7 @@ jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BIJ(
void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal( void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(handle); auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(handle);
assert(wbwi != nullptr);
delete wbwi; delete wbwi;
} }
@ -375,6 +376,7 @@ void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(
void Java_org_rocksdb_WBWIRocksIterator_disposeInternal( void Java_org_rocksdb_WBWIRocksIterator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
assert(it != nullptr);
delete it; delete it;
} }
@ -437,7 +439,12 @@ void Java_org_rocksdb_WBWIRocksIterator_seek0(
JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget, JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget,
jint jtarget_len) { jint jtarget_len) {
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
jbyte* target = env->GetByteArrayElements(jtarget, 0); jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
if(target == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Slice target_slice( rocksdb::Slice target_slice(
reinterpret_cast<char*>(target), jtarget_len); reinterpret_cast<char*>(target), jtarget_len);
@ -497,26 +504,41 @@ jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(
results[0] = 0x0; results[0] = 0x0;
} }
//TODO(AR) do we leak buf and value_buf? // key_slice and value_slice will be freed by org.rocksdb.DirectSlice#close
//set the pointer to the key slice auto* key_slice = new rocksdb::Slice(we.key.data(), we.key.size());
char* buf = new char[we.key.size()];
memcpy(buf, we.key.data(), we.key.size());
auto* key_slice = new rocksdb::Slice(buf, we.key.size());
results[1] = reinterpret_cast<jlong>(key_slice); results[1] = reinterpret_cast<jlong>(key_slice);
if (we.type == rocksdb::kDeleteRecord
//set the pointer to the value slice || we.type == rocksdb::kLogDataRecord) {
if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) {
// set native handle of value slice to null if no value available // set native handle of value slice to null if no value available
results[2] = 0; results[2] = 0;
} else { } else {
char* value_buf = new char[we.value.size()]; auto* value_slice = new rocksdb::Slice(we.value.data(), we.value.size());
memcpy(value_buf, we.value.data(), we.value.size());
auto* value_slice = new rocksdb::Slice(value_buf, we.value.size());
results[2] = reinterpret_cast<jlong>(value_slice); results[2] = reinterpret_cast<jlong>(value_slice);
} }
jlongArray jresults = env->NewLongArray(3); jlongArray jresults = env->NewLongArray(3);
if(jresults == nullptr) {
// exception thrown: OutOfMemoryError
if(results[2] != 0) {
auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
delete value_slice;
}
delete key_slice;
return nullptr;
}
env->SetLongArrayRegion(jresults, 0, 3, results); env->SetLongArrayRegion(jresults, 0, 3, results);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jresults);
if(results[2] != 0) {
auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
delete value_slice;
}
delete key_slice;
return nullptr;
}
return jresults; return jresults;
} }

@ -16,69 +16,202 @@ WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback(
// Note: we want to access the Java WriteBatchHandler instance // Note: we want to access the Java WriteBatchHandler instance
// across multiple method calls, so we create a global ref // across multiple method calls, so we create a global ref
assert(jWriteBatchHandler != nullptr);
m_jWriteBatchHandler = env->NewGlobalRef(jWriteBatchHandler); m_jWriteBatchHandler = env->NewGlobalRef(jWriteBatchHandler);
if(m_jWriteBatchHandler == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env); m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env);
if(m_jPutMethodId == nullptr) {
// exception thrown
return;
}
m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env); m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env);
if(m_jMergeMethodId == nullptr) {
// exception thrown
return;
}
m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env); m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env);
if(m_jDeleteMethodId == nullptr) {
// exception thrown
return;
}
m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env); m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env);
if(m_jLogDataMethodId == nullptr) {
// exception thrown
return;
}
m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env); m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env);
if(m_jContinueMethodId == nullptr) {
// exception thrown
return;
}
} }
void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) { void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) {
const jbyteArray j_key = sliceToJArray(key); const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
const jbyteArray j_value = sliceToJArray(value); const jbyteArray j_value = sliceToJArray(value);
if(j_value == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->CallVoidMethod( m_env->CallVoidMethod(
m_jWriteBatchHandler, m_jWriteBatchHandler,
m_jPutMethodId, m_jPutMethodId,
j_key, j_key,
j_value); j_value);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_value); if(j_value != nullptr) {
m_env->DeleteLocalRef(j_key); m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
} }
void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) { void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) {
const jbyteArray j_key = sliceToJArray(key); const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
const jbyteArray j_value = sliceToJArray(value); const jbyteArray j_value = sliceToJArray(value);
if(j_value == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->CallVoidMethod( m_env->CallVoidMethod(
m_jWriteBatchHandler, m_jWriteBatchHandler,
m_jMergeMethodId, m_jMergeMethodId,
j_key, j_key,
j_value); j_value);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_value); if(j_value != nullptr) {
m_env->DeleteLocalRef(j_key); m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
} }
void WriteBatchHandlerJniCallback::Delete(const Slice& key) { void WriteBatchHandlerJniCallback::Delete(const Slice& key) {
const jbyteArray j_key = sliceToJArray(key); const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
m_env->CallVoidMethod( m_env->CallVoidMethod(
m_jWriteBatchHandler, m_jWriteBatchHandler,
m_jDeleteMethodId, m_jDeleteMethodId,
j_key); j_key);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_key); if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
} }
void WriteBatchHandlerJniCallback::LogData(const Slice& blob) { void WriteBatchHandlerJniCallback::LogData(const Slice& blob) {
const jbyteArray j_blob = sliceToJArray(blob); const jbyteArray j_blob = sliceToJArray(blob);
if(j_blob == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
m_env->CallVoidMethod( m_env->CallVoidMethod(
m_jWriteBatchHandler, m_jWriteBatchHandler,
m_jLogDataMethodId, m_jLogDataMethodId,
j_blob); j_blob);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_blob != nullptr) {
m_env->DeleteLocalRef(j_blob);
}
return;
}
m_env->DeleteLocalRef(j_blob); if(j_blob != nullptr) {
m_env->DeleteLocalRef(j_blob);
}
} }
bool WriteBatchHandlerJniCallback::Continue() { bool WriteBatchHandlerJniCallback::Continue() {
jboolean jContinue = m_env->CallBooleanMethod( jboolean jContinue = m_env->CallBooleanMethod(
m_jWriteBatchHandler, m_jWriteBatchHandler,
m_jContinueMethodId); m_jContinueMethodId);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
}
return static_cast<bool>(jContinue == JNI_TRUE); return static_cast<bool>(jContinue == JNI_TRUE);
} }
@ -89,16 +222,36 @@ bool WriteBatchHandlerJniCallback::Continue() {
* When calling this function * When calling this function
* you must remember to call env->DeleteLocalRef * you must remember to call env->DeleteLocalRef
* on the result after you have finished with it * on the result after you have finished with it
*
* @param s A Slice to convery to a Java byte array
*
* @return A reference to a Java byte array, or a nullptr if an
* exception occurs
*/ */
jbyteArray WriteBatchHandlerJniCallback::sliceToJArray(const Slice& s) { jbyteArray WriteBatchHandlerJniCallback::sliceToJArray(const Slice& s) {
jbyteArray ja = m_env->NewByteArray(static_cast<jsize>(s.size())); jbyteArray ja = m_env->NewByteArray(static_cast<jsize>(s.size()));
if(ja == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
m_env->SetByteArrayRegion( m_env->SetByteArrayRegion(
ja, 0, static_cast<jsize>(s.size()), ja, 0, static_cast<jsize>(s.size()),
reinterpret_cast<const jbyte*>(s.data())); reinterpret_cast<const jbyte*>(s.data()));
if(m_env->ExceptionCheck()) {
if(ja != nullptr) {
m_env->DeleteLocalRef(ja);
}
// exception thrown: ArrayIndexOutOfBoundsException
return nullptr;
}
return ja; return ja;
} }
WriteBatchHandlerJniCallback::~WriteBatchHandlerJniCallback() { WriteBatchHandlerJniCallback::~WriteBatchHandlerJniCallback() {
m_env->DeleteGlobalRef(m_jWriteBatchHandler); if(m_jWriteBatchHandler != nullptr) {
m_env->DeleteGlobalRef(m_jWriteBatchHandler);
}
} }
} // namespace rocksdb } // namespace rocksdb

@ -13,13 +13,14 @@ public class RocksDBColumnFamilySample {
RocksDB.loadLibrary(); RocksDB.loadLibrary();
} }
public static void main(String[] args) throws RocksDBException { public static void main(final String[] args) throws RocksDBException {
if (args.length < 1) { if (args.length < 1) {
System.out.println( System.out.println(
"usage: RocksDBColumnFamilySample db_path"); "usage: RocksDBColumnFamilySample db_path");
return; System.exit(-1);
} }
String db_path = args[0];
final String db_path = args[0];
System.out.println("RocksDBColumnFamilySample"); System.out.println("RocksDBColumnFamilySample");
try(final Options options = new Options().setCreateIfMissing(true); try(final Options options = new Options().setCreateIfMissing(true);
@ -54,8 +55,6 @@ public class RocksDBColumnFamilySample {
// put and get from non-default column family // put and get from non-default column family
db.put(columnFamilyHandles.get(0), new WriteOptions(), db.put(columnFamilyHandles.get(0), new WriteOptions(),
"key".getBytes(), "value".getBytes()); "key".getBytes(), "value".getBytes());
String value = new String(db.get(columnFamilyHandles.get(0),
"key".getBytes()));
// atomic write // atomic write
try (final WriteBatch wb = new WriteBatch()) { try (final WriteBatch wb = new WriteBatch()) {

@ -12,31 +12,31 @@ import java.util.ArrayList;
import org.rocksdb.*; import org.rocksdb.*;
import org.rocksdb.util.SizeUnit; import org.rocksdb.util.SizeUnit;
import java.io.IOException;
public class RocksDBSample { public class RocksDBSample {
static { static {
RocksDB.loadLibrary(); RocksDB.loadLibrary();
} }
public static void main(String[] args) { public static void main(final String[] args) {
if (args.length < 1) { if (args.length < 1) {
System.out.println("usage: RocksDBSample db_path"); System.out.println("usage: RocksDBSample db_path");
return; System.exit(-1);
} }
String db_path = args[0];
String db_path_not_found = db_path + "_not_found"; final String db_path = args[0];
final String db_path_not_found = db_path + "_not_found";
System.out.println("RocksDBSample"); System.out.println("RocksDBSample");
try (final Options options = new Options(); try (final Options options = new Options();
final Filter bloomFilter = new BloomFilter(10); final Filter bloomFilter = new BloomFilter(10);
final ReadOptions readOptions = new ReadOptions() final ReadOptions readOptions = new ReadOptions()
.setFillCache(false)) { .setFillCache(false);
final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) {
try (final RocksDB db = RocksDB.open(options, db_path_not_found)) { try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
assert (false); assert (false);
} catch (RocksDBException e) { } catch (final RocksDBException e) {
System.out.format("caught the expected exception -- %s\n", e); System.out.format("Caught the expected exception -- %s\n", e);
} }
try { try {
@ -47,11 +47,11 @@ public class RocksDBSample {
.setMaxBackgroundCompactions(10) .setMaxBackgroundCompactions(10)
.setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompressionType(CompressionType.SNAPPY_COMPRESSION)
.setCompactionStyle(CompactionStyle.UNIVERSAL); .setCompactionStyle(CompactionStyle.UNIVERSAL);
} catch (IllegalArgumentException e) { } catch (final IllegalArgumentException e) {
assert (false); assert (false);
} }
Statistics stats = options.statisticsPtr(); final Statistics stats = options.statisticsPtr();
assert (options.createIfMissing() == true); assert (options.createIfMissing() == true);
assert (options.writeBufferSize() == 8 * SizeUnit.KB); assert (options.writeBufferSize() == 8 * SizeUnit.KB);
@ -85,9 +85,7 @@ public class RocksDBSample {
options.setAllowMmapReads(true); options.setAllowMmapReads(true);
assert (options.tableFactoryName().equals("PlainTable")); assert (options.tableFactoryName().equals("PlainTable"));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000, options.setRateLimiter(rateLimiter);
10000, 10));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000));
final BlockBasedTableConfig table_options = new BlockBasedTableConfig(); final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB) table_options.setBlockCacheSize(64 * SizeUnit.KB)
@ -114,12 +112,14 @@ public class RocksDBSample {
try (final RocksDB db = RocksDB.open(options, db_path)) { try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes()); db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
final byte[] value = db.get("hello".getBytes());
assert ("world".equals(new String(value))); assert ("world".equals(new String(value)));
String str = db.getProperty("rocksdb.stats");
final String str = db.getProperty("rocksdb.stats");
assert (str != null && !str.equals("")); assert (str != null && !str.equals(""));
} catch (RocksDBException e) { } catch (final RocksDBException e) {
System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e); System.out.format("[ERROR] caught the unexpected exception -- %s\n", e);
assert (false); assert (false);
} }
@ -174,8 +174,8 @@ public class RocksDBSample {
value = db.get(readOptions, "world".getBytes()); value = db.get(readOptions, "world".getBytes());
assert (value == null); assert (value == null);
byte[] testKey = "asdf".getBytes(); final byte[] testKey = "asdf".getBytes();
byte[] testValue = final byte[] testValue =
"asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes(); "asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
db.put(testKey, testValue); db.put(testKey, testValue);
byte[] testResult = db.get(testKey); byte[] testResult = db.get(testKey);
@ -187,8 +187,8 @@ public class RocksDBSample {
assert (Arrays.equals(testValue, testResult)); assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult))); assert (new String(testValue).equals(new String(testResult)));
byte[] insufficientArray = new byte[10]; final byte[] insufficientArray = new byte[10];
byte[] enoughArray = new byte[50]; final byte[] enoughArray = new byte[50];
int len; int len;
len = db.get(testKey, insufficientArray); len = db.get(testKey, insufficientArray);
assert (len > insufficientArray.length); assert (len > insufficientArray.length);
@ -220,21 +220,21 @@ public class RocksDBSample {
} }
try { try {
for (TickerType statsType : TickerType.values()) { for (final TickerType statsType : TickerType.values()) {
stats.getTickerCount(statsType); stats.getTickerCount(statsType);
} }
System.out.println("getTickerCount() passed."); System.out.println("getTickerCount() passed.");
} catch (Exception e) { } catch (final Exception e) {
System.out.println("Failed in call to getTickerCount()"); System.out.println("Failed in call to getTickerCount()");
assert (false); //Should never reach here. assert (false); //Should never reach here.
} }
try { try {
for (HistogramType histogramType : HistogramType.values()) { for (final HistogramType histogramType : HistogramType.values()) {
HistogramData data = stats.getHistogramData(histogramType); HistogramData data = stats.getHistogramData(histogramType);
} }
System.out.println("getHistogramData() passed."); System.out.println("getHistogramData() passed.");
} catch (Exception e) { } catch (final Exception e) {
System.out.println("Failed in call to getHistogramData()"); System.out.println("Failed in call to getHistogramData()");
assert (false); //Should never reach here. assert (false); //Should never reach here.
} }
@ -283,16 +283,16 @@ public class RocksDBSample {
Map<byte[], byte[]> values = db.multiGet(keys); Map<byte[], byte[]> values = db.multiGet(keys);
assert (values.size() == keys.size()); assert (values.size() == keys.size());
for (byte[] value1 : values.values()) { for (final byte[] value1 : values.values()) {
assert (value1 != null); assert (value1 != null);
} }
values = db.multiGet(new ReadOptions(), keys); values = db.multiGet(new ReadOptions(), keys);
assert (values.size() == keys.size()); assert (values.size() == keys.size());
for (byte[] value1 : values.values()) { for (final byte[] value1 : values.values()) {
assert (value1 != null); assert (value1 != null);
} }
} catch (RocksDBException e) { } catch (final RocksDBException e) {
System.err.println(e); System.err.println(e);
} }
} }

@ -57,6 +57,20 @@ public abstract class AbstractSlice<T> extends RocksMutableObject {
*/ */
protected abstract T data0(long handle); protected abstract T data0(long handle);
/**
* Drops the specified {@code n}
* number of bytes from the start
* of the backing slice
*
* @param n The number of bytes to drop
*/
public abstract void removePrefix(final int n);
/**
* Clears the backing slice
*/
public abstract void clear();
/** /**
* Return the length (in bytes) of the data. * Return the length (in bytes) of the data.
* *

@ -143,7 +143,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override @Override
public ColumnFamilyOptions setMergeOperator( public ColumnFamilyOptions setMergeOperator(
final MergeOperator mergeOperator) { final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
return this; return this;
} }

@ -134,15 +134,6 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return paranoidChecks(nativeHandle_); return paranoidChecks(nativeHandle_);
} }
@Override
public DBOptions setRateLimiterConfig(
final RateLimiterConfig config) {
assert(isOwningHandle());
rateLimiterConfig_ = config;
setOldRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
}
@Override @Override
public DBOptions setRateLimiter(final RateLimiter rateLimiter) { public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle()); assert(isOwningHandle());
@ -650,9 +641,6 @@ public long delayedWriteRate(){
private native void setParanoidChecks( private native void setParanoidChecks(
long handle, boolean paranoidChecks); long handle, boolean paranoidChecks);
private native boolean paranoidChecks(long handle); private native boolean paranoidChecks(long handle);
@Deprecated
private native void setOldRateLimiter(long handle,
long rateLimiterHandle);
private native void setRateLimiter(long handle, private native void setRateLimiter(long handle,
long rateLimiterHandle); long rateLimiterHandle);
private native void setLogger(long handle, private native void setLogger(long handle,
@ -750,6 +738,5 @@ public long delayedWriteRate(){
private native long delayedWriteRate(long handle); private native long delayedWriteRate(long handle);
int numShardBits_; int numShardBits_;
RateLimiterConfig rateLimiterConfig_;
RateLimiter rateLimiter_; RateLimiter rateLimiter_;
} }

@ -118,18 +118,6 @@ public interface DBOptionsInterface {
*/ */
boolean paranoidChecks(); boolean paranoidChecks();
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.
* Default: nullptr
*
* @param config rate limiter config.
* @return the instance of the current Object.
* @deprecated See: {@link #setRateLimiter(RateLimiter)}.
*/
@Deprecated
Object setRateLimiterConfig(RateLimiterConfig config);
/** /**
* Use to control write rate of flush and compaction. Flush has higher * Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr. * priority than compaction. Rate limiting is disabled if nullptr.

@ -18,6 +18,13 @@ import java.nio.ByteBuffer;
public class DirectSlice extends AbstractSlice<ByteBuffer> { public class DirectSlice extends AbstractSlice<ByteBuffer> {
public final static DirectSlice NONE = new DirectSlice(); public final static DirectSlice NONE = new DirectSlice();
/**
* Indicates whether we have to free the memory pointed to by the Slice
*/
private final boolean internalBuffer;
private volatile boolean cleared = false;
private volatile long internalBufferOffset = 0;
/** /**
* Called from JNI to construct a new Java DirectSlice * Called from JNI to construct a new Java DirectSlice
* without an underlying C++ object set * without an underlying C++ object set
@ -32,6 +39,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/ */
DirectSlice() { DirectSlice() {
super(); super();
this.internalBuffer = false;
} }
/** /**
@ -43,6 +51,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/ */
public DirectSlice(final String str) { public DirectSlice(final String str) {
super(createNewSliceFromString(str)); super(createNewSliceFromString(str));
this.internalBuffer = true;
} }
/** /**
@ -55,6 +64,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/ */
public DirectSlice(final ByteBuffer data, final int length) { public DirectSlice(final ByteBuffer data, final int length) {
super(createNewDirectSlice0(ensureDirect(data), length)); super(createNewDirectSlice0(ensureDirect(data), length));
this.internalBuffer = false;
} }
/** /**
@ -66,12 +76,13 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/ */
public DirectSlice(final ByteBuffer data) { public DirectSlice(final ByteBuffer data) {
super(createNewDirectSlice1(ensureDirect(data))); super(createNewDirectSlice1(ensureDirect(data)));
this.internalBuffer = false;
} }
private static ByteBuffer ensureDirect(final ByteBuffer data) { private static ByteBuffer ensureDirect(final ByteBuffer data) {
// TODO(AR) consider throwing a checked exception, as if it's not direct if(!data.isDirect()) {
// this can SIGSEGV throw new IllegalArgumentException("The ByteBuffer must be direct");
assert(data.isDirect()); }
return data; return data;
} }
@ -83,26 +94,29 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* *
* @return the requested byte * @return the requested byte
*/ */
public byte get(int offset) { public byte get(final int offset) {
return get0(getNativeHandle(), offset); return get0(getNativeHandle(), offset);
} }
/** @Override
* Clears the backing slice
*/
public void clear() { public void clear() {
clear0(getNativeHandle()); clear0(getNativeHandle(), !cleared && internalBuffer, internalBufferOffset);
cleared = true;
} }
/** @Override
* Drops the specified {@code n}
* number of bytes from the start
* of the backing slice
*
* @param n The number of bytes to drop
*/
public void removePrefix(final int n) { public void removePrefix(final int n) {
removePrefix0(getNativeHandle(), n); removePrefix0(getNativeHandle(), n);
this.internalBufferOffset += n;
}
@Override
protected void disposeInternal() {
final long nativeHandle = getNativeHandle();
if(!cleared && internalBuffer) {
disposeInternalBuf(nativeHandle, internalBufferOffset);
}
disposeInternal(nativeHandle);
} }
private native static long createNewDirectSlice0(final ByteBuffer data, private native static long createNewDirectSlice0(final ByteBuffer data,
@ -110,6 +124,9 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
private native static long createNewDirectSlice1(final ByteBuffer data); private native static long createNewDirectSlice1(final ByteBuffer data);
@Override protected final native ByteBuffer data0(long handle); @Override protected final native ByteBuffer data0(long handle);
private native byte get0(long handle, int offset); private native byte get0(long handle, int offset);
private native void clear0(long handle); private native void clear0(long handle, boolean internalBuffer,
long internalBufferOffset);
private native void removePrefix0(long handle, int length); private native void removePrefix0(long handle, int length);
private native void disposeInternalBuf(final long handle,
long internalBufferOffset);
} }

@ -134,15 +134,15 @@ public class EnvOptions extends RocksObject {
return writableFileMaxBufferSize(nativeHandle_); return writableFileMaxBufferSize(nativeHandle_);
} }
public EnvOptions setRateLimiterConfig(final RateLimiterConfig rateLimiterConfig) { public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
this.rateLimiterConfig = rateLimiterConfig; this.rateLimiter = rateLimiter;
setRateLimiter(nativeHandle_, rateLimiterConfig.newRateLimiterHandle()); setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
return this; return this;
} }
public RateLimiterConfig rateLimiterConfig() { public RateLimiter rateLimiter() {
assert(isOwningHandle()); assert(isOwningHandle());
return rateLimiterConfig; return rateLimiter;
} }
private native static long newEnvOptions(); private native static long newEnvOptions();
@ -203,5 +203,5 @@ public class EnvOptions extends RocksObject {
private native void setRateLimiter(final long handle, final long rateLimiterHandle); private native void setRateLimiter(final long handle, final long rateLimiterHandle);
private RateLimiterConfig rateLimiterConfig; private RateLimiter rateLimiter;
} }

@ -1,68 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*
* @see RateLimiterConfig
* @deprecated obsolete. See: {@link org.rocksdb.RateLimiter}.
*/
@Deprecated
public class GenericRateLimiterConfig extends RateLimiterConfig {
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
private static final int DEFAULT_FAIRNESS = 10;
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
* @param refillPeriodMicros this controls how often tokens are refilled. For example,
* when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
* 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
* burstier writes while smaller value introduces more CPU overhead.
* The default should work for most cases.
* @param fairness RateLimiter accepts high-pri requests and low-pri requests.
* A low-pri request is usually blocked in favor of hi-pri request. Currently,
* RocksDB assigns low-pri to request from compaction and high-pri to request
* from flush. Low-pri requests can get blocked if flush requests come in
* continuously. This fairness parameter grants low-pri requests permission by
* fairness chance even though high-pri requests exist to avoid starvation.
* You should be good by leaving it at default 10.
*/
public GenericRateLimiterConfig(final long rateBytesPerSecond,
final long refillPeriodMicros, final int fairness) {
rateBytesPerSecond_ = rateBytesPerSecond;
refillPeriodMicros_ = refillPeriodMicros;
fairness_ = fairness;
}
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
*/
public GenericRateLimiterConfig(final long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
}
@Override protected long newRateLimiterHandle() {
return newRateLimiterHandle(rateBytesPerSecond_, refillPeriodMicros_,
fairness_);
}
private native long newRateLimiterHandle(long rateBytesPerSecond,
long refillPeriodMicros, int fairness);
private final long rateBytesPerSecond_;
private final long refillPeriodMicros_;
private final int fairness_;
}

@ -10,6 +10,8 @@ package org.rocksdb;
* two merge operands held under the same key in order to obtain a single * two merge operands held under the same key in order to obtain a single
* value. * value.
*/ */
public interface MergeOperator { public abstract class MergeOperator extends RocksObject {
long newMergeOperatorHandle(); protected MergeOperator(final long nativeHandle) {
super(nativeHandle);
}
} }

@ -49,6 +49,10 @@ public class MutableColumnFamilyOptions {
* For int[] values, each int should be separated by a comma, e.g. * For int[] values, each int should be separated by a comma, e.g.
* *
* key1=value1;intArrayKey1=1,2,3 * key1=value1;intArrayKey1=1,2,3
*
* @param str The string representation of the mutable column family options
*
* @return A builder for the mutable column family options
*/ */
public static MutableColumnFamilyOptionsBuilder parse(final String str) { public static MutableColumnFamilyOptionsBuilder parse(final String str) {
Objects.requireNonNull(str); Objects.requireNonNull(str);

@ -188,7 +188,7 @@ public class Options extends RocksObject
@Override @Override
public Options setMergeOperator(final MergeOperator mergeOperator) { public Options setMergeOperator(final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
return this; return this;
} }
@ -683,13 +683,6 @@ public class Options extends RocksObject
return this; return this;
} }
@Override
public Options setRateLimiterConfig(final RateLimiterConfig config) {
rateLimiterConfig_ = config;
setOldRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
}
@Override @Override
public Options setRateLimiter(final RateLimiter rateLimiter) { public Options setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle()); assert(isOwningHandle());
@ -1202,9 +1195,6 @@ public class Options extends RocksObject
private native void setParanoidChecks( private native void setParanoidChecks(
long handle, boolean paranoidChecks); long handle, boolean paranoidChecks);
private native boolean paranoidChecks(long handle); private native boolean paranoidChecks(long handle);
@Deprecated
private native void setOldRateLimiter(long handle,
long rateLimiterHandle);
private native void setRateLimiter(long handle, private native void setRateLimiter(long handle,
long rateLimiterHandle); long rateLimiterHandle);
private native void setLogger(long handle, private native void setLogger(long handle,
@ -1436,7 +1426,6 @@ public class Options extends RocksObject
Env env_; Env env_;
MemTableConfig memTableConfig_; MemTableConfig memTableConfig_;
TableFormatConfig tableFormatConfig_; TableFormatConfig tableFormatConfig_;
RateLimiterConfig rateLimiterConfig_;
RateLimiter rateLimiter_; RateLimiter rateLimiter_;
AbstractComparator<? extends AbstractSlice<?>> comparator_; AbstractComparator<? extends AbstractSlice<?>> comparator_;
} }

@ -1,26 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*
* @deprecated obsolete. See: {@link org.rocksdb.RateLimiter}.
*/
@Deprecated
public abstract class RateLimiterConfig {
/**
* This function should only be called by
* {@link org.rocksdb.DBOptions#setRateLimiter(long, long)}, which will
* create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
* with a Java RateLimiterConfig.
*
* @see org.rocksdb.DBOptions#setRateLimiter(long, long)
*
* @return native handle address to rate limiter instance.
*/
abstract protected long newRateLimiterHandle();
}

@ -520,11 +520,11 @@ public class RocksDB extends RocksObject {
* to make this lighter weight is to avoid doing any IOs. * to make this lighter weight is to avoid doing any IOs.
* *
* @param key byte array of a key to search for * @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is * @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache. * found in block-cache.
* @return boolean value indicating if key does not exist or might exist. * @return boolean value indicating if key does not exist or might exist.
*/ */
public boolean keyMayExist(final byte[] key, final StringBuffer value) { public boolean keyMayExist(final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length, value); return keyMayExist(nativeHandle_, key, 0, key.length, value);
} }
@ -537,12 +537,12 @@ public class RocksDB extends RocksObject {
* *
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for * @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is * @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache. * found in block-cache.
* @return boolean value indicating if key does not exist or might exist. * @return boolean value indicating if key does not exist or might exist.
*/ */
public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final StringBuffer value) { final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length, return keyMayExist(nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_, value); columnFamilyHandle.nativeHandle_, value);
} }
@ -556,12 +556,12 @@ public class RocksDB extends RocksObject {
* *
* @param readOptions {@link ReadOptions} instance * @param readOptions {@link ReadOptions} instance
* @param key byte array of a key to search for * @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is * @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache. * found in block-cache.
* @return boolean value indicating if key does not exist or might exist. * @return boolean value indicating if key does not exist or might exist.
*/ */
public boolean keyMayExist(final ReadOptions readOptions, public boolean keyMayExist(final ReadOptions readOptions,
final byte[] key, final StringBuffer value) { final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_, return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, value); key, 0, key.length, value);
} }
@ -576,13 +576,13 @@ public class RocksDB extends RocksObject {
* @param readOptions {@link ReadOptions} instance * @param readOptions {@link ReadOptions} instance
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for * @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is * @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache. * found in block-cache.
* @return boolean value indicating if key does not exist or might exist. * @return boolean value indicating if key does not exist or might exist.
*/ */
public boolean keyMayExist(final ReadOptions readOptions, public boolean keyMayExist(final ReadOptions readOptions,
final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final StringBuffer value) { final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_, return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, columnFamilyHandle.nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_,
value); value);
@ -685,6 +685,9 @@ public class RocksDB extends RocksObject {
columnFamilyHandle.nativeHandle_); columnFamilyHandle.nativeHandle_);
} }
// TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
// when we could communicate better status into, also the C++ code show that -2 could be returned
/** /**
* Get the value associated with the specified key within column family* * Get the value associated with the specified key within column family*
* @param key the key to retrieve the value. * @param key the key to retrieve the value.
@ -1917,6 +1920,8 @@ public class RocksDB extends RocksObject {
* This function will wait until all currently running background processes * This function will wait until all currently running background processes
* finish. After it returns, no background process will be run until * finish. After it returns, no background process will be run until
* {@link #continueBackgroundWork()} is called * {@link #continueBackgroundWork()} is called
*
* @throws RocksDBException If an error occurs when pausing background work
*/ */
public void pauseBackgroundWork() throws RocksDBException { public void pauseBackgroundWork() throws RocksDBException {
pauseBackgroundWork(nativeHandle_); pauseBackgroundWork(nativeHandle_);
@ -1925,6 +1930,8 @@ public class RocksDB extends RocksObject {
/** /**
* Resumes backround work which was suspended by * Resumes backround work which was suspended by
* previously calling {@link #pauseBackgroundWork()} * previously calling {@link #pauseBackgroundWork()}
*
* @throws RocksDBException If an error occurs when resuming background work
*/ */
public void continueBackgroundWork() throws RocksDBException { public void continueBackgroundWork() throws RocksDBException {
continueBackgroundWork(nativeHandle_); continueBackgroundWork(nativeHandle_);
@ -2182,17 +2189,17 @@ public class RocksDB extends RocksObject {
long wbwiHandle) throws RocksDBException; long wbwiHandle) throws RocksDBException;
protected native boolean keyMayExist(final long handle, final byte[] key, protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength, final int keyOffset, final int keyLength,
final StringBuffer stringBuffer); final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle, final byte[] key, protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength, final long cfHandle, final int keyOffset, final int keyLength, final long cfHandle,
final StringBuffer stringBuffer); final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle, protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset, final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final StringBuffer stringBuffer); final int keyLength, final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle, protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset, final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final long cfHandle, final int keyLength, final long cfHandle,
final StringBuffer stringBuffer); final StringBuilder stringBuilder);
protected native void merge(long handle, byte[] key, int keyOffset, protected native void merge(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength) int keyLength, byte[] value, int valueOffset, int valueLength)
throws RocksDBException; throws RocksDBException;

@ -30,6 +30,24 @@ public abstract class RocksMutableObject extends AbstractNativeReference {
this.owningHandle_ = true; this.owningHandle_ = true;
} }
/**
* Closes the existing handle, and changes the handle to the new handle
*
* @param newNativeHandle The C++ pointer to the new native object
* @param owningNativeHandle true if we own the new native object
*/
public synchronized void resetNativeHandle(final long newNativeHandle,
final boolean owningNativeHandle) {
close();
setNativeHandle(newNativeHandle, owningNativeHandle);
}
/**
* Sets the handle (C++ pointer) of the underlying C++ native object
*
* @param nativeHandle The C++ pointer to the native object
* @param owningNativeHandle true if we own the native object
*/
public synchronized void setNativeHandle(final long nativeHandle, public synchronized void setNativeHandle(final long nativeHandle,
final boolean owningNativeHandle) { final boolean owningNativeHandle) {
this.nativeHandle_ = nativeHandle; this.nativeHandle_ = nativeHandle;

@ -14,6 +14,13 @@ package org.rocksdb;
* values consider using {@link org.rocksdb.DirectSlice}</p> * values consider using {@link org.rocksdb.DirectSlice}</p>
*/ */
public class Slice extends AbstractSlice<byte[]> { public class Slice extends AbstractSlice<byte[]> {
/**
* Indicates whether we have to free the memory pointed to by the Slice
*/
private volatile boolean cleared;
private volatile long internalBufferOffset = 0;
/** /**
* <p>Called from JNI to construct a new Java Slice * <p>Called from JNI to construct a new Java Slice
* without an underlying C++ object set * without an underlying C++ object set
@ -27,6 +34,7 @@ public class Slice extends AbstractSlice<byte[]> {
* Slice objects through this, they are not creating underlying C++ Slice * Slice objects through this, they are not creating underlying C++ Slice
* objects, and so there is nothing to free (dispose) from Java.</p> * objects, and so there is nothing to free (dispose) from Java.</p>
*/ */
@SuppressWarnings("unused")
private Slice() { private Slice() {
super(); super();
} }
@ -62,6 +70,18 @@ public class Slice extends AbstractSlice<byte[]> {
super(createNewSlice1(data)); super(createNewSlice1(data));
} }
@Override
public void clear() {
clear0(getNativeHandle(), !cleared, internalBufferOffset);
cleared = true;
}
@Override
public void removePrefix(final int n) {
removePrefix0(getNativeHandle(), n);
this.internalBufferOffset += n;
}
/** /**
* <p>Deletes underlying C++ slice pointer * <p>Deletes underlying C++ slice pointer
* and any buffered data.</p> * and any buffered data.</p>
@ -74,7 +94,9 @@ public class Slice extends AbstractSlice<byte[]> {
@Override @Override
protected void disposeInternal() { protected void disposeInternal() {
final long nativeHandle = getNativeHandle(); final long nativeHandle = getNativeHandle();
disposeInternalBuf(nativeHandle); if(!cleared) {
disposeInternalBuf(nativeHandle, internalBufferOffset);
}
super.disposeInternal(nativeHandle); super.disposeInternal(nativeHandle);
} }
@ -82,5 +104,9 @@ public class Slice extends AbstractSlice<byte[]> {
private native static long createNewSlice0(final byte[] data, private native static long createNewSlice0(final byte[] data,
final int length); final int length);
private native static long createNewSlice1(final byte[] data); private native static long createNewSlice1(final byte[] data);
private native void disposeInternalBuf(final long handle); private native void clear0(long handle, boolean internalBuffer,
long internalBufferOffset);
private native void removePrefix0(long handle, int length);
private native void disposeInternalBuf(final long handle,
long internalBufferOffset);
} }

@ -9,9 +9,11 @@ package org.rocksdb;
* StringAppendOperator is a merge operator that concatenates * StringAppendOperator is a merge operator that concatenates
* two strings. * two strings.
*/ */
public class StringAppendOperator implements MergeOperator { public class StringAppendOperator extends MergeOperator {
@Override public long newMergeOperatorHandle() { public StringAppendOperator() {
return newMergeOperatorHandleImpl(); super(newSharedStringAppendOperator());
} }
private native long newMergeOperatorHandleImpl();
private native static long newSharedStringAppendOperator();
@Override protected final native void disposeInternal(final long handle);
} }

@ -65,7 +65,7 @@ public class TransactionLogIterator extends RocksObject {
* by a TransactionLogIterator containing a sequence * by a TransactionLogIterator containing a sequence
* number and a {@link WriteBatch} instance.</p> * number and a {@link WriteBatch} instance.</p>
*/ */
public final class BatchResult { public static final class BatchResult {
/** /**
* <p>Constructor of BatchResult class.</p> * <p>Constructor of BatchResult class.</p>
* *

@ -29,12 +29,11 @@ public class WBWIRocksIterator
*/ */
public WriteEntry entry() { public WriteEntry entry() {
assert(isOwningHandle()); assert(isOwningHandle());
assert(entry != null);
final long ptrs[] = entry1(nativeHandle_); final long ptrs[] = entry1(nativeHandle_);
entry.type = WriteType.fromId((byte)ptrs[0]); entry.type = WriteType.fromId((byte)ptrs[0]);
entry.key.setNativeHandle(ptrs[1], true); entry.key.resetNativeHandle(ptrs[1], ptrs[1] != 0);
entry.value.setNativeHandle(ptrs[2], ptrs[2] != 0); entry.value.resetNativeHandle(ptrs[2], ptrs[2] != 0);
return entry; return entry;
} }
@ -75,6 +74,12 @@ public class WBWIRocksIterator
} }
} }
@Override
public void close() {
entry.close();
super.close();
}
/** /**
* Represents an entry returned by * Represents an entry returned by
* {@link org.rocksdb.WBWIRocksIterator#entry()} * {@link org.rocksdb.WBWIRocksIterator#entry()}
@ -84,7 +89,7 @@ public class WBWIRocksIterator
* or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG} * or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG}
* will not have a value. * will not have a value.
*/ */
public static class WriteEntry { public static class WriteEntry implements AutoCloseable {
WriteType type = null; WriteType type = null;
final DirectSlice key; final DirectSlice key;
final DirectSlice value; final DirectSlice value;
@ -101,7 +106,8 @@ public class WBWIRocksIterator
value = new DirectSlice(); value = new DirectSlice();
} }
public WriteEntry(WriteType type, DirectSlice key, DirectSlice value) { public WriteEntry(final WriteType type, final DirectSlice key,
final DirectSlice value) {
this.type = type; this.type = type;
this.key = key; this.key = key;
this.value = value; this.value = value;
@ -154,7 +160,7 @@ public class WBWIRocksIterator
} }
@Override @Override
public boolean equals(Object other) { public boolean equals(final Object other) {
if(other == null) { if(other == null) {
return false; return false;
} else if (this == other) { } else if (this == other) {
@ -168,5 +174,11 @@ public class WBWIRocksIterator
return false; return false;
} }
} }
@Override
public void close() {
value.close();
key.close();
}
} }
} }

@ -144,6 +144,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @param options The database options to use * @param options The database options to use
* @param key The key to read the value for * @param key The key to read the value for
* *
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the batch does not have enough data to resolve * @throws RocksDBException if the batch does not have enough data to resolve
* Merge operations, MergeInProgress status may be returned. * Merge operations, MergeInProgress status may be returned.
*/ */
@ -160,6 +163,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @param options The database options to use * @param options The database options to use
* @param key The key to read the value for * @param key The key to read the value for
* *
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the batch does not have enough data to resolve * @throws RocksDBException if the batch does not have enough data to resolve
* Merge operations, MergeInProgress status may be returned. * Merge operations, MergeInProgress status may be returned.
*/ */
@ -181,10 +187,14 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* (the keys in this batch do not yet belong to any snapshot and will be * (the keys in this batch do not yet belong to any snapshot and will be
* fetched regardless). * fetched regardless).
* *
* @param db The Rocks database
* @param columnFamilyHandle The column family to retrieve the value from * @param columnFamilyHandle The column family to retrieve the value from
* @param options The read options to use * @param options The read options to use
* @param key The key to read the value for * @param key The key to read the value for
* *
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the value for the key cannot be read * @throws RocksDBException if the value for the key cannot be read
*/ */
public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle, public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle,
@ -207,9 +217,13 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* (the keys in this batch do not yet belong to any snapshot and will be * (the keys in this batch do not yet belong to any snapshot and will be
* fetched regardless). * fetched regardless).
* *
* @param db The Rocks database
* @param options The read options to use * @param options The read options to use
* @param key The key to read the value for * @param key The key to read the value for
* *
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the value for the key cannot be read * @throws RocksDBException if the value for the key cannot be read
*/ */
public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options, public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options,

@ -203,8 +203,9 @@ public class ColumnFamilyTest {
@Test @Test
public void writeBatch() throws RocksDBException { public void writeBatch() throws RocksDBException {
try (final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions() try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
.setMergeOperator(new StringAppendOperator())) { final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator)) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList( final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
defaultCfOptions), defaultCfOptions),

@ -388,25 +388,11 @@ public class DBOptionsTest {
} }
} }
@Test
public void rateLimiterConfig() {
try(final DBOptions options = new DBOptions();
final DBOptions anotherOptions = new DBOptions()) {
final RateLimiterConfig rateLimiterConfig =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
options.setRateLimiterConfig(rateLimiterConfig);
// Test with parameter initialization
anotherOptions.setRateLimiterConfig(
new GenericRateLimiterConfig(1000));
}
}
@Test @Test
public void rateLimiter() { public void rateLimiter() {
try(final DBOptions options = new DBOptions(); try(final DBOptions options = new DBOptions();
final DBOptions anotherOptions = new DBOptions()) { final DBOptions anotherOptions = new DBOptions();
final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1); final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1)) {
options.setRateLimiter(rateLimiter); options.setRateLimiter(rateLimiter);
// Test with parameter initialization // Test with parameter initialization
anotherOptions.setRateLimiter( anotherOptions.setRateLimiter(

@ -54,7 +54,7 @@ public class DirectSliceTest {
} }
} }
@Test(expected = AssertionError.class) @Test(expected = IllegalArgumentException.class)
public void directSliceInitWithoutDirectAllocation() { public void directSliceInitWithoutDirectAllocation() {
final byte[] data = "Some text".getBytes(); final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data); final ByteBuffer buffer = ByteBuffer.wrap(data);
@ -63,7 +63,7 @@ public class DirectSliceTest {
} }
} }
@Test(expected = AssertionError.class) @Test(expected = IllegalArgumentException.class)
public void directSlicePrefixInitWithoutDirectAllocation() { public void directSlicePrefixInitWithoutDirectAllocation() {
final byte[] data = "Some text".getBytes(); final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data); final ByteBuffer buffer = ByteBuffer.wrap(data);
@ -71,4 +71,23 @@ public class DirectSliceTest {
//no-op //no-op
} }
} }
@Test
public void directSliceClear() {
try(final DirectSlice directSlice = new DirectSlice("abc")) {
assertThat(directSlice.toString()).isEqualTo("abc");
directSlice.clear();
assertThat(directSlice.toString()).isEmpty();
directSlice.clear(); // make sure we don't double-free
}
}
@Test
public void directSliceRemovePrefix() {
try(final DirectSlice directSlice = new DirectSlice("abc")) {
assertThat(directSlice.toString()).isEqualTo("abc");
directSlice.removePrefix(1);
assertThat(directSlice.toString()).isEqualTo("bc");
}
}
} }

@ -118,16 +118,16 @@ public class EnvOptionsTest {
} }
@Test @Test
public void rateLimiterConfig() { public void rateLimiter() {
try (final EnvOptions envOptions = new EnvOptions()) { try (final EnvOptions envOptions = new EnvOptions();
final RateLimiterConfig rateLimiterConfig1 = final RateLimiter rateLimiter1 = new RateLimiter(1000, 100 * 1000, 1)) {
new GenericRateLimiterConfig(1000, 100 * 1000, 1); envOptions.setRateLimiter(rateLimiter1);
envOptions.setRateLimiterConfig(rateLimiterConfig1); assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter1);
assertThat(envOptions.rateLimiterConfig()).isEqualTo(rateLimiterConfig1);
try(final RateLimiter rateLimiter2 = new RateLimiter(1000)) {
final RateLimiterConfig rateLimiterConfig2 = new GenericRateLimiterConfig(1000); envOptions.setRateLimiter(rateLimiter2);
envOptions.setRateLimiterConfig(rateLimiterConfig2); assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter2);
assertThat(envOptions.rateLimiterConfig()).isEqualTo(rateLimiterConfig2); }
} }
} }
} }

@ -43,21 +43,21 @@ public class KeyMayExistTest {
isEqualTo(2); isEqualTo(2);
db.put("key".getBytes(), "value".getBytes()); db.put("key".getBytes(), "value".getBytes());
// Test without column family // Test without column family
StringBuffer retValue = new StringBuffer(); StringBuilder retValue = new StringBuilder();
boolean exists = db.keyMayExist("key".getBytes(), retValue); boolean exists = db.keyMayExist("key".getBytes(), retValue);
assertThat(exists).isTrue(); assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value"); assertThat(retValue.toString()).isEqualTo("value");
// Test without column family but with readOptions // Test without column family but with readOptions
try (final ReadOptions readOptions = new ReadOptions()) { try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer(); retValue = new StringBuilder();
exists = db.keyMayExist(readOptions, "key".getBytes(), retValue); exists = db.keyMayExist(readOptions, "key".getBytes(), retValue);
assertThat(exists).isTrue(); assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value"); assertThat(retValue.toString()).isEqualTo("value");
} }
// Test with column family // Test with column family
retValue = new StringBuffer(); retValue = new StringBuilder();
exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(), exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(),
retValue); retValue);
assertThat(exists).isTrue(); assertThat(exists).isTrue();
@ -65,7 +65,7 @@ public class KeyMayExistTest {
// Test with column family and readOptions // Test with column family and readOptions
try (final ReadOptions readOptions = new ReadOptions()) { try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer(); retValue = new StringBuilder();
exists = db.keyMayExist(readOptions, exists = db.keyMayExist(readOptions,
columnFamilyHandleList.get(0), "key".getBytes(), columnFamilyHandleList.get(0), "key".getBytes(),
retValue); retValue);

@ -89,11 +89,10 @@ public class MergeTest {
@Test @Test
public void operatorOption() public void operatorOption()
throws InterruptedException, RocksDBException { throws InterruptedException, RocksDBException {
final StringAppendOperator stringAppendOperator = try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
new StringAppendOperator(); final Options opt = new Options()
try (final Options opt = new Options() .setCreateIfMissing(true)
.setCreateIfMissing(true) .setMergeOperator(stringAppendOperator);
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt, final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) { dbFolder.getRoot().getAbsolutePath())) {
// Writing aa under key // Writing aa under key
@ -112,10 +111,9 @@ public class MergeTest {
@Test @Test
public void cFOperatorOption() public void cFOperatorOption()
throws InterruptedException, RocksDBException { throws InterruptedException, RocksDBException {
final StringAppendOperator stringAppendOperator = try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
new StringAppendOperator(); final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() .setMergeOperator(stringAppendOperator);
.setMergeOperator(stringAppendOperator);
final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator) .setMergeOperator(stringAppendOperator)
) { ) {
@ -175,42 +173,43 @@ public class MergeTest {
@Test @Test
public void operatorGcBehaviour() public void operatorGcBehaviour()
throws RocksDBException { throws RocksDBException {
final StringAppendOperator stringAppendOperator try (final StringAppendOperator stringAppendOperator = new StringAppendOperator()) {
= new StringAppendOperator(); try (final Options opt = new Options()
try (final Options opt = new Options() .setCreateIfMissing(true)
.setCreateIfMissing(true) .setMergeOperator(stringAppendOperator);
.setMergeOperator(stringAppendOperator); final RocksDB db = RocksDB.open(opt,
final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
dbFolder.getRoot().getAbsolutePath())) { //no-op
//no-op }
}
// test reuse
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test param init // test reuse
try (final Options opt = new Options() try (final Options opt = new Options()
.setMergeOperator(new StringAppendOperator()); .setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt, final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) { dbFolder.getRoot().getAbsolutePath())) {
//no-op //no-op
} }
// test replace one with another merge operator instance // test param init
try (final Options opt = new Options() try (final StringAppendOperator stringAppendOperator2 = new StringAppendOperator();
.setMergeOperator(stringAppendOperator)) { final Options opt = new Options()
final StringAppendOperator newStringAppendOperator .setMergeOperator(stringAppendOperator2);
= new StringAppendOperator(); final RocksDB db = RocksDB.open(opt,
opt.setMergeOperator(newStringAppendOperator); dbFolder.getRoot().getAbsolutePath())) {
try (final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op //no-op
} }
// test replace one with another merge operator instance
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final StringAppendOperator newStringAppendOperator = new StringAppendOperator()) {
opt.setMergeOperator(newStringAppendOperator);
try (final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
}
} }
} }

@ -697,16 +697,15 @@ public class OptionsTest {
@Test @Test
public void compressionPerLevel() { public void compressionPerLevel() {
try (final ColumnFamilyOptions columnFamilyOptions = try (final Options options = new Options()) {
new ColumnFamilyOptions()) { assertThat(options.compressionPerLevel()).isEmpty();
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = List<CompressionType> compressionTypeList =
new ArrayList<>(); new ArrayList<>();
for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { for (int i = 0; i < options.numLevels(); i++) {
compressionTypeList.add(CompressionType.NO_COMPRESSION); compressionTypeList.add(CompressionType.NO_COMPRESSION);
} }
columnFamilyOptions.setCompressionPerLevel(compressionTypeList); options.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel(); compressionTypeList = options.compressionPerLevel();
for (final CompressionType compressionType : compressionTypeList) { for (final CompressionType compressionType : compressionTypeList) {
assertThat(compressionType).isEqualTo( assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION); CompressionType.NO_COMPRESSION);
@ -716,19 +715,18 @@ public class OptionsTest {
@Test @Test
public void differentCompressionsPerLevel() { public void differentCompressionsPerLevel() {
try (final ColumnFamilyOptions columnFamilyOptions = try (final Options options = new Options()) {
new ColumnFamilyOptions()) { options.setNumLevels(3);
columnFamilyOptions.setNumLevels(3);
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); assertThat(options.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>(); List<CompressionType> compressionTypeList = new ArrayList<>();
compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION); compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION); compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
compressionTypeList.add(CompressionType.LZ4_COMPRESSION); compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
columnFamilyOptions.setCompressionPerLevel(compressionTypeList); options.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel(); compressionTypeList = options.compressionPerLevel();
assertThat(compressionTypeList.size()).isEqualTo(3); assertThat(compressionTypeList.size()).isEqualTo(3);
assertThat(compressionTypeList). assertThat(compressionTypeList).
@ -767,26 +765,12 @@ public class OptionsTest {
} }
} }
@Test
public void rateLimiterConfig() {
try (final Options options = new Options();
final Options anotherOptions = new Options()) {
final RateLimiterConfig rateLimiterConfig =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
options.setRateLimiterConfig(rateLimiterConfig);
// Test with parameter initialization
anotherOptions.setRateLimiterConfig(
new GenericRateLimiterConfig(1000));
}
}
@Test @Test
public void rateLimiter() { public void rateLimiter() {
try (final Options options = new Options(); try (final Options options = new Options();
final Options anotherOptions = new Options()) { final Options anotherOptions = new Options();
final RateLimiter rateLimiter = final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1); new RateLimiter(1000, 100 * 1000, 1)) {
options.setRateLimiter(rateLimiter); options.setRateLimiter(rateLimiter);
// Test with parameter initialization // Test with parameter initialization
anotherOptions.setRateLimiter( anotherOptions.setRateLimiter(
@ -810,7 +794,6 @@ public class OptionsTest {
} }
} }
@Test @Test
public void shouldTestMemTableFactoryName() public void shouldTestMemTableFactoryName()
throws RocksDBException { throws RocksDBException {

@ -0,0 +1,49 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.ClassRule;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class RateLimiterTest {
@ClassRule
public static final RocksMemoryResource rocksMemoryResource =
new RocksMemoryResource();
@Test
public void setBytesPerSecond() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
rateLimiter.setBytesPerSecond(2000);
}
}
@Test
public void getSingleBurstBytes() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getSingleBurstBytes()).isEqualTo(100);
}
}
@Test
public void getTotalBytesThrough() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getTotalBytesThrough()).isEqualTo(0);
}
}
@Test
public void getTotalRequests() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getTotalRequests()).isEqualTo(0);
}
}
}

@ -73,8 +73,10 @@ public class RocksDBTest {
@Test @Test
public void write() throws RocksDBException { public void write() throws RocksDBException {
try (final Options options = new Options().setMergeOperator( try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
new StringAppendOperator()).setCreateIfMissing(true); final Options options = new Options()
.setMergeOperator(stringAppendOperator)
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options, final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath()); dbFolder.getRoot().getAbsolutePath());
final WriteOptions opts = new WriteOptions()) { final WriteOptions opts = new WriteOptions()) {
@ -182,9 +184,10 @@ public class RocksDBTest {
@Test @Test
public void merge() throws RocksDBException { public void merge() throws RocksDBException {
try (final Options opt = new Options() try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
.setCreateIfMissing(true) final Options opt = new Options()
.setMergeOperator(new StringAppendOperator()); .setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final WriteOptions wOpt = new WriteOptions(); final WriteOptions wOpt = new WriteOptions();
final RocksDB db = RocksDB.open(opt, final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath()) dbFolder.getRoot().getAbsolutePath())

@ -32,6 +32,25 @@ public class SliceTest {
} }
} }
@Test
public void sliceClear() {
try (final Slice slice = new Slice("abc")) {
assertThat(slice.toString()).isEqualTo("abc");
slice.clear();
assertThat(slice.toString()).isEmpty();
slice.clear(); // make sure we don't double-free
}
}
@Test
public void sliceRemovePrefix() {
try (final Slice slice = new Slice("abc")) {
assertThat(slice.toString()).isEqualTo("abc");
slice.removePrefix(1);
assertThat(slice.toString()).isEqualTo("bc");
}
}
@Test @Test
public void sliceEquals() { public void sliceEquals() {
try (final Slice slice = new Slice("abc"); try (final Slice slice = new Slice("abc");

@ -192,16 +192,16 @@ public class WriteBatchWithIndexTest {
final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length); final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
buffer.put(zeroByteValue); buffer.put(zeroByteValue);
WBWIRocksIterator.WriteEntry[] expected = { final WBWIRocksIterator.WriteEntry expected =
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(buffer, zeroByteValue.length), new DirectSlice(buffer, zeroByteValue.length),
new DirectSlice(buffer, zeroByteValue.length)) new DirectSlice(buffer, zeroByteValue.length));
};
try (final WBWIRocksIterator it = wbwi.newIterator()) { try (final WBWIRocksIterator it = wbwi.newIterator()) {
it.seekToFirst(); it.seekToFirst();
assertThat(it.entry().equals(expected[0])).isTrue(); final WBWIRocksIterator.WriteEntry actual = it.entry();
assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue(); assertThat(actual.equals(expected)).isTrue();
assertThat(it.entry().hashCode() == expected.hashCode()).isTrue();
} }
} }
} }

@ -31,12 +31,12 @@ public class RocksJunitRunner {
* *
* @param system JUnitSystem * @param system JUnitSystem
*/ */
public RocksJunitListener(JUnitSystem system) { public RocksJunitListener(final JUnitSystem system) {
super(system); super(system);
} }
@Override @Override
public void testStarted(Description description) { public void testStarted(final Description description) {
System.out.format("Run: %s testing now -> %s \n", System.out.format("Run: %s testing now -> %s \n",
description.getClassName(), description.getClassName(),
description.getMethodName()); description.getMethodName());
@ -48,21 +48,23 @@ public class RocksJunitRunner {
* *
* @param args Test classes as String names * @param args Test classes as String names
*/ */
public static void main(String[] args){ public static void main(final String[] args){
JUnitCore runner = new JUnitCore(); final JUnitCore runner = new JUnitCore();
final JUnitSystem system = new RealSystem(); final JUnitSystem system = new RealSystem();
runner.addListener(new RocksJunitListener(system)); runner.addListener(new RocksJunitListener(system));
try { try {
List<Class<?>> classes = new ArrayList<>(); final List<Class<?>> classes = new ArrayList<>();
for (String arg : args) { for (final String arg : args) {
classes.add(Class.forName(arg)); classes.add(Class.forName(arg));
} }
final Result result = runner.run(classes.toArray(new Class[1])); final Class[] clazzes = classes.toArray(new Class[classes.size()]);
final Result result = runner.run(clazzes);
if(!result.wasSuccessful()) { if(!result.wasSuccessful()) {
System.exit(-1); System.exit(-1);
} }
} catch (ClassNotFoundException e) { } catch (final ClassNotFoundException e) {
e.printStackTrace(); e.printStackTrace();
System.exit(-2);
} }
} }
} }

Loading…
Cancel
Save