Fixed various memory leaks and Java 8 JNI Compatibility

Summary:
I have manually audited the entire RocksJava code base.

Sorry for the large pull-request, I have broken it down into many small atomic commits though.

My initial intention was to fix the warnings that appear when running RocksJava on Java 8 with `-Xcheck:jni`, for example when running `make jtest` you would see many errors similar to:

```
WARNING in native method: JNI call made without checking exceptions when required to from CallObjectMethod
WARNING in native method: JNI call made without checking exceptions when required to from CallVoidMethod
WARNING in native method: JNI call made without checking exceptions when required to from CallStaticVoidMethod
...
```

A few of those warnings still remain, however they seem to come directly from the JVM and are not directly related to RocksJava; I am in contact with the OpenJDK hostpot-dev mailing list about these - http://mail.openjdk.java.net/pipermail/hotspot-dev/2017-February/025981.html.

As a result of fixing these, I realised we were not r
Closes https://github.com/facebook/rocksdb/pull/1890

Differential Revision: D4591758

Pulled By: siying

fbshipit-source-id: 7f7fdf4
main
Adam Retter 8 years ago committed by Facebook Github Bot
parent be3e5568be
commit c6d464a9da
  1. 16
      java/Makefile
  2. 42
      java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
  3. 42
      java/rocksjni/backupablejni.cc
  4. 40
      java/rocksjni/backupenginejni.cc
  5. 17
      java/rocksjni/checkpoint.cc
  6. 5
      java/rocksjni/columnfamilyhandle.cc
  7. 4
      java/rocksjni/compaction_filter.cc
  8. 4
      java/rocksjni/comparator.cc
  9. 284
      java/rocksjni/comparatorjnicallback.cc
  10. 3
      java/rocksjni/comparatorjnicallback.h
  11. 4
      java/rocksjni/env.cc
  12. 11
      java/rocksjni/env_options.cc
  13. 48
      java/rocksjni/external_sst_file_info.cc
  14. 17
      java/rocksjni/filter.cc
  15. 26
      java/rocksjni/iterator.cc
  16. 267
      java/rocksjni/loggerjnicallback.cc
  17. 4
      java/rocksjni/loggerjnicallback.h
  18. 25
      java/rocksjni/merge_operator.cc
  19. 343
      java/rocksjni/options.cc
  20. 2169
      java/rocksjni/portal.h
  21. 65
      java/rocksjni/ratelimiterjni.cc
  22. 4
      java/rocksjni/restorejni.cc
  23. 789
      java/rocksjni/rocksjni.cc
  24. 132
      java/rocksjni/slice.cc
  25. 11
      java/rocksjni/sst_file_writerjni.cc
  26. 26
      java/rocksjni/statistics.cc
  27. 9
      java/rocksjni/transaction_log.cc
  28. 105
      java/rocksjni/ttl.cc
  29. 17
      java/rocksjni/write_batch.cc
  30. 10
      java/rocksjni/write_batch_test.cc
  31. 62
      java/rocksjni/write_batch_with_index.cc
  32. 167
      java/rocksjni/writebatchhandlerjnicallback.cc
  33. 9
      java/samples/src/main/java/RocksDBColumnFamilySample.java
  34. 58
      java/samples/src/main/java/RocksDBSample.java
  35. 14
      java/src/main/java/org/rocksdb/AbstractSlice.java
  36. 2
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  37. 13
      java/src/main/java/org/rocksdb/DBOptions.java
  38. 12
      java/src/main/java/org/rocksdb/DBOptionsInterface.java
  39. 49
      java/src/main/java/org/rocksdb/DirectSlice.java
  40. 12
      java/src/main/java/org/rocksdb/EnvOptions.java
  41. 68
      java/src/main/java/org/rocksdb/GenericRateLimiterConfig.java
  42. 6
      java/src/main/java/org/rocksdb/MergeOperator.java
  43. 4
      java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
  44. 13
      java/src/main/java/org/rocksdb/Options.java
  45. 26
      java/src/main/java/org/rocksdb/RateLimiterConfig.java
  46. 31
      java/src/main/java/org/rocksdb/RocksDB.java
  47. 18
      java/src/main/java/org/rocksdb/RocksMutableObject.java
  48. 30
      java/src/main/java/org/rocksdb/Slice.java
  49. 10
      java/src/main/java/org/rocksdb/StringAppendOperator.java
  50. 2
      java/src/main/java/org/rocksdb/TransactionLogIterator.java
  51. 24
      java/src/main/java/org/rocksdb/WBWIRocksIterator.java
  52. 14
      java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
  53. 5
      java/src/test/java/org/rocksdb/ColumnFamilyTest.java
  54. 18
      java/src/test/java/org/rocksdb/DBOptionsTest.java
  55. 23
      java/src/test/java/org/rocksdb/DirectSliceTest.java
  56. 20
      java/src/test/java/org/rocksdb/EnvOptionsTest.java
  57. 8
      java/src/test/java/org/rocksdb/KeyMayExistTest.java
  58. 79
      java/src/test/java/org/rocksdb/MergeTest.java
  59. 43
      java/src/test/java/org/rocksdb/OptionsTest.java
  60. 49
      java/src/test/java/org/rocksdb/RateLimiterTest.java
  61. 13
      java/src/test/java/org/rocksdb/RocksDBTest.java
  62. 19
      java/src/test/java/org/rocksdb/SliceTest.java
  63. 10
      java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
  64. 18
      java/src/test/java/org/rocksdb/test/RocksJunitRunner.java

@ -18,7 +18,6 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.ExternalSstFileInfo\
org.rocksdb.FlushOptions\
org.rocksdb.Filter\
org.rocksdb.GenericRateLimiterConfig\
org.rocksdb.HashLinkedListMemTableConfig\
org.rocksdb.HashSkipListMemTableConfig\
org.rocksdb.Logger\
@ -91,6 +90,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.NativeLibraryLoaderTest\
org.rocksdb.OptionsTest\
org.rocksdb.PlainTableConfigTest\
org.rocksdb.RateLimiterTest\
org.rocksdb.ReadOnlyTest\
org.rocksdb.ReadOptionsTest\
org.rocksdb.RocksDBTest\
@ -136,6 +136,14 @@ JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR):$(JAVA_HAMCR_JAR):$(JAVA_MOCKITO_JAR):$(J
MVN_LOCAL = ~/.m2/repository
# Set the default JAVA_ARGS to "" for DEBUG_LEVEL=0
JAVA_ARGS? =
# When debugging add -Xcheck:jni to the java args
ifneq ($(DEBUG_LEVEL),0)
JAVA_ARGS = -ea -Xcheck:jni
endif
clean:
$(AM_V_at)rm -rf include/*
$(AM_V_at)rm -rf test-libs/
@ -164,7 +172,7 @@ sample: java
$(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java
$(AM_V_at)@rm -rf /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni
java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
@ -172,7 +180,7 @@ column_family_sample: java
$(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
$(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java
$(AM_V_at)@rm -rf /tmp/rocksdbjni
java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni
java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni
$(AM_V_at)@rm -rf /tmp/rocksdbjni
resolve_test_deps:
@ -194,7 +202,7 @@ java_test: java resolve_test_deps
test: java java_test run_test
run_test:
java -ea -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS)
java $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS)
db_bench: java
$(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES)

@ -572,7 +572,7 @@ public class DbBenchmark {
(Integer)flags_.get(Flag.num_levels));
options.setTargetFileSizeBase(
(Integer)flags_.get(Flag.target_file_size_base));
options.setTargetFileSizeMultiplier((Double) flags_.get(Flag.target_file_size_multiplier));
options.setTargetFileSizeMultiplier((Integer)flags_.get(Flag.target_file_size_multiplier));
options.setMaxBytesForLevelBase(
(Integer)flags_.get(Flag.max_bytes_for_level_base));
options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier));
@ -588,12 +588,10 @@ public class DbBenchmark {
(Double)flags_.get(Flag.hard_rate_limit));
options.setRateLimitDelayMaxMilliseconds(
(Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds));
options.setMaxGrandparentOverlapFactor(
(Integer)flags_.get(Flag.max_grandparent_overlap_factor));
options.setMaxCompactionBytes(
(Long) flags_.get(Flag.max_compaction_bytes));
options.setDisableAutoCompactions(
(Boolean)flags_.get(Flag.disable_auto_compactions));
options.setSourceCompactionFactor(
(Integer)flags_.get(Flag.source_compaction_factor));
options.setMaxSuccessiveMerges(
(Integer)flags_.get(Flag.max_successive_merges));
options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));
@ -978,7 +976,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
write_buffer_size(4 * SizeUnit.MB,
write_buffer_size(4L * SizeUnit.MB,
"Number of bytes to buffer in memtable before compacting\n" +
"\t(initialized to default value by 'main'.)") {
@Override public Object parseValue(String value) {
@ -1056,7 +1054,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
numdistinct(1000,
numdistinct(1000L,
"Number of distinct keys to use. Used in RandomWithVerify to\n" +
"\tread/write on fewer keys so that gets are more likely to find the\n" +
"\tkey and puts are more likely to update the same key.") {
@ -1064,7 +1062,7 @@ public class DbBenchmark {
return Long.parseLong(value);
}
},
merge_keys(-1,
merge_keys(-1L,
"Number of distinct keys to use for MergeRandom and\n" +
"\tReadRandomMergeRandom.\n" +
"\tIf negative, there will be FLAGS_num keys.") {
@ -1169,7 +1167,7 @@ public class DbBenchmark {
return Long.parseLong(value);
}
},
compressed_cache_size(-1,
compressed_cache_size(-1L,
"Number of bytes to use as a cache of compressed data.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);
@ -1188,7 +1186,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
memtable_bloom_size_ratio(0, "Ratio of memtable used by the bloom filter.\n"
memtable_bloom_size_ratio(0.0d, "Ratio of memtable used by the bloom filter.\n"
+ "\t0 means no bloom filter.") {
@Override public Object parseValue(String value) {
return Double.parseDouble(value);
@ -1212,7 +1210,7 @@ public class DbBenchmark {
return parseBoolean(value);
}
},
writes(-1,"Number of write operations to do. If negative, do\n" +
writes(-1L, "Number of write operations to do. If negative, do\n" +
"\t--num reads.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);
@ -1255,7 +1253,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
max_bytes_for_level_multiplier(10,
max_bytes_for_level_multiplier(10.0d,
"A multiplier to compute max bytes for level-N (N >= 2)") {
@Override public Object parseValue(String value) {
return Double.parseDouble(value);
@ -1337,7 +1335,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
stats_interval(0,"Stats are reported every N operations when\n" +
stats_interval(0L, "Stats are reported every N operations when\n" +
"\tthis is greater than zero. When 0 the interval grows over time.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);
@ -1354,12 +1352,12 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
soft_rate_limit(0.0,"") {
soft_rate_limit(0.0d,"") {
@Override public Object parseValue(String value) {
return Double.parseDouble(value);
}
},
hard_rate_limit(0.0,"When not equal to 0 this make threads\n" +
hard_rate_limit(0.0d,"When not equal to 0 this make threads\n" +
"\tsleep at each stats reporting interval until the compaction\n" +
"\tscore for all levels is less than or equal to this value.") {
@Override public Object parseValue(String value) {
@ -1373,11 +1371,10 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
max_grandparent_overlap_factor(10,"Control maximum bytes of\n" +
"\toverlaps in grandparent (i.e., level+2) before we stop building a\n" +
"\tsingle file in a level->level+1 compaction.") {
max_compaction_bytes(0L, "Limit number of bytes in one compaction to be lower than this\n" +
"\threshold. But it's not guaranteed.") {
@Override public Object parseValue(String value) {
return Integer.parseInt(value);
return Long.parseLong(value);
}
},
readonly(false,"Run read only benchmarks.") {
@ -1390,13 +1387,6 @@ public class DbBenchmark {
return parseBoolean(value);
}
},
source_compaction_factor(1,"Cap the size of data in level-K for\n" +
"\ta compaction run that compacts Level-K with Level-(K+1) (for\n" +
"\tK >= 1)") {
@Override public Object parseValue(String value) {
return Integer.parseInt(value);
}
},
wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);

@ -27,8 +27,12 @@
*/
jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
JNIEnv* env, jclass jcls, jstring jpath) {
const char* cpath = env->GetStringUTFChars(jpath, NULL);
auto bopt = new rocksdb::BackupableDBOptions(cpath);
const char* cpath = env->GetStringUTFChars(jpath, nullptr);
if(cpath == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* bopt = new rocksdb::BackupableDBOptions(cpath);
env->ReleaseStringUTFChars(jpath, cpath);
return reinterpret_cast<jlong>(bopt);
}
@ -40,7 +44,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
*/
jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
JNIEnv* env, jobject jopt, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return env->NewStringUTF(bopt->backup_dir.c_str());
}
@ -51,7 +55,7 @@ jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
*/
void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->share_table_files = flag;
}
@ -62,7 +66,7 @@ void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(
*/
jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->share_table_files;
}
@ -73,7 +77,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
*/
void Java_org_rocksdb_BackupableDBOptions_setSync(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->sync = flag;
}
@ -84,7 +88,7 @@ void Java_org_rocksdb_BackupableDBOptions_setSync(
*/
jboolean Java_org_rocksdb_BackupableDBOptions_sync(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->sync;
}
@ -95,7 +99,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_sync(
*/
void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->destroy_old_data = flag;
}
@ -106,7 +110,7 @@ void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(
*/
jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->destroy_old_data;
}
@ -117,7 +121,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(
*/
void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->backup_log_files = flag;
}
@ -128,7 +132,7 @@ void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(
*/
jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->backup_log_files;
}
@ -139,7 +143,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(
*/
void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jbackup_rate_limit) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->backup_rate_limit = jbackup_rate_limit;
}
@ -150,7 +154,7 @@ void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
*/
jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->backup_rate_limit;
}
@ -161,7 +165,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
*/
void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrestore_rate_limit) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->restore_rate_limit = jrestore_rate_limit;
}
@ -172,7 +176,7 @@ void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
*/
jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->restore_rate_limit;
}
@ -183,7 +187,7 @@ jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
*/
void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->share_files_with_checksum = flag;
}
@ -194,7 +198,7 @@ void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
*/
jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return bopt->share_files_with_checksum;
}
@ -205,7 +209,7 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
*/
void Java_org_rocksdb_BackupableDBOptions_disposeInternal(
JNIEnv* env, jobject jopt, jlong jhandle) {
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
assert(bopt);
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
assert(bopt != nullptr);
delete bopt;
}

@ -82,11 +82,15 @@ jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups(
backup_engine->GetCorruptedBackups(&backup_ids);
// store backupids in int array
std::vector<jint> int_backup_ids(backup_ids.begin(), backup_ids.end());
// Store ints in java array
jintArray ret_backup_ids;
// Its ok to loose precision here (64->32)
jsize ret_backup_ids_size = static_cast<jsize>(backup_ids.size());
ret_backup_ids = env->NewIntArray(ret_backup_ids_size);
jintArray ret_backup_ids = env->NewIntArray(ret_backup_ids_size);
if(ret_backup_ids == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size,
int_backup_ids.data());
return ret_backup_ids;
@ -155,14 +159,24 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromBackup(
JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id,
jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) {
auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
const char* db_dir = env->GetStringUTFChars(jdb_dir, 0);
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
if(db_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_dir, db_dir);
return;
}
auto* restore_options =
reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
auto status =
backup_engine->RestoreDBFromBackup(
static_cast<rocksdb::BackupID>(jbackup_id), db_dir, wal_dir,
*restore_options);
env->ReleaseStringUTFChars(jwal_dir, wal_dir);
env->ReleaseStringUTFChars(jdb_dir, db_dir);
@ -182,13 +196,23 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
JNIEnv* env, jobject jbe, jlong jbe_handle, jstring jdb_dir,
jstring jwal_dir, jlong jrestore_options_handle) {
auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
const char* db_dir = env->GetStringUTFChars(jdb_dir, 0);
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
if(db_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_dir, db_dir);
return;
}
auto* restore_options =
reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
auto status =
backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir,
*restore_options);
env->ReleaseStringUTFChars(jwal_dir, wal_dir);
env->ReleaseStringUTFChars(jdb_dir, db_dir);
@ -206,5 +230,7 @@ void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
*/
void Java_org_rocksdb_BackupEngine_disposeInternal(
JNIEnv* env, jobject jbe, jlong jbe_handle) {
delete reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
auto* be = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
assert(be != nullptr);
delete be;
}

@ -22,7 +22,7 @@
*/
jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env,
jclass jclazz, jlong jdb_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb::Checkpoint* checkpoint;
rocksdb::Checkpoint::Create(db, &checkpoint);
return reinterpret_cast<jlong>(checkpoint);
@ -35,8 +35,8 @@ jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env,
*/
void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj,
jlong jhandle) {
auto checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(jhandle);
assert(checkpoint);
auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(jhandle);
assert(checkpoint != nullptr);
delete checkpoint;
}
@ -48,13 +48,20 @@ void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj,
void Java_org_rocksdb_Checkpoint_createCheckpoint(
JNIEnv* env, jobject jobj, jlong jcheckpoint_handle,
jstring jcheckpoint_path) {
auto checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(
jcheckpoint_handle);
const char* checkpoint_path = env->GetStringUTFChars(
jcheckpoint_path, 0);
if(checkpoint_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(
jcheckpoint_handle);
rocksdb::Status s = checkpoint->CreateCheckpoint(
checkpoint_path);
env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path);
if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}

@ -20,6 +20,7 @@
*/
void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(handle);
delete it;
auto* cfh = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(handle);
assert(cfh != nullptr);
delete cfh;
}

@ -20,6 +20,8 @@
*/
void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::CompactionFilter*>(handle);
auto* cf = reinterpret_cast<rocksdb::CompactionFilter*>(handle);
assert(cf != nullptr);
delete cf;
}
// </editor-fold>

@ -27,7 +27,9 @@
*/
void Java_org_rocksdb_AbstractComparator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::BaseComparatorJniCallback*>(handle);
auto* bcjc = reinterpret_cast<rocksdb::BaseComparatorJniCallback*>(handle);
assert(bcjc != nullptr);
delete bcjc;
}
// </editor-fold>

@ -17,35 +17,60 @@ BaseComparatorJniCallback::BaseComparatorJniCallback(
mtx_findShortestSeparator(new port::Mutex(copt->use_adaptive_mutex)) {
// Note: Comparator methods may be accessed by multiple threads,
// so we ref the jvm not the env
const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm);
assert(rs == JNI_OK);
const jint rs = env->GetJavaVM(&m_jvm);
if(rs != JNI_OK) {
// exception thrown
return;
}
// Note: we want to access the Java Comparator instance
// across multiple method calls, so we create a global ref
assert(jComparator != nullptr);
m_jComparator = env->NewGlobalRef(jComparator);
if(m_jComparator == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
// Note: The name of a Comparator will not change during it's lifetime,
// so we cache it in a global var
jmethodID jNameMethodId = AbstractComparatorJni::getNameMethodId(env);
if(jNameMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
jstring jsName = (jstring)env->CallObjectMethod(m_jComparator, jNameMethodId);
m_name = JniUtil::copyString(env, jsName); // also releases jsName
if(env->ExceptionCheck()) {
// exception thrown
return;
}
jboolean has_exception = JNI_FALSE;
m_name = JniUtil::copyString(env, jsName,
&has_exception); // also releases jsName
if (has_exception == JNI_TRUE) {
// exception thrown
return;
}
m_jCompareMethodId = AbstractComparatorJni::getCompareMethodId(env);
if(m_jCompareMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jFindShortestSeparatorMethodId =
AbstractComparatorJni::getFindShortestSeparatorMethodId(env);
if(m_jFindShortestSeparatorMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
m_jFindShortSuccessorMethodId =
AbstractComparatorJni::getFindShortSuccessorMethodId(env);
}
/**
* Attach/Get a JNIEnv for the current native thread
*/
JNIEnv* BaseComparatorJniCallback::getJniEnv() const {
JNIEnv *env;
jint rs __attribute__((unused)) =
m_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
assert(rs == JNI_OK);
return env;
if(m_jFindShortSuccessorMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
}
const char* BaseComparatorJniCallback::Name() const {
@ -53,22 +78,50 @@ const char* BaseComparatorJniCallback::Name() const {
}
int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const {
JNIEnv* m_env = getJniEnv();
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
// TODO(adamretter): slice objects can potentially be cached using thread
// local variables to avoid locking. Could make this configurable depending on
// performance.
mtx_compare->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceA, &a, JNI_FALSE);
AbstractSliceJni::setHandle(m_env, m_jSliceB, &b, JNI_FALSE);
bool pending_exception =
AbstractSliceJni::setHandle(env, m_jSliceA, &a, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return 0;
}
pending_exception =
AbstractSliceJni::setHandle(env, m_jSliceB, &b, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return 0;
}
jint result =
m_env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA,
env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA,
m_jSliceB);
mtx_compare->Unlock();
m_jvm->DetachCurrentThread();
if(env->ExceptionCheck()) {
// exception thrown from CallIntMethod
env->ExceptionDescribe(); // print out exception to stderr
result = 0; // we could not get a result from java callback so use 0
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return result;
}
@ -79,32 +132,80 @@ void BaseComparatorJniCallback::FindShortestSeparator(
return;
}
JNIEnv* m_env = getJniEnv();
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
const char* startUtf = start->c_str();
jstring jsStart = m_env->NewStringUTF(startUtf);
jstring jsStart = env->NewStringUTF(startUtf);
if(jsStart == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsStart);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
// TODO(adamretter): slice object can potentially be cached using thread local
// variable to avoid locking. Could make this configurable depending on
// performance.
mtx_findShortestSeparator->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit, JNI_FALSE);
bool pending_exception =
AbstractSliceJni::setHandle(env, m_jSliceLimit, &limit, JNI_FALSE);
if(pending_exception) {
if(env->ExceptionCheck()) {
// exception thrown from setHandle or descendant
env->ExceptionDescribe(); // print out exception to stderr
}
if(jsStart != nullptr) {
env->DeleteLocalRef(jsStart);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
jstring jsResultStart =
(jstring)m_env->CallObjectMethod(m_jComparator,
(jstring)env->CallObjectMethod(m_jComparator,
m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit);
mtx_findShortestSeparator->Unlock();
m_env->DeleteLocalRef(jsStart);
if(env->ExceptionCheck()) {
// exception thrown from CallObjectMethod
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsStart);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->DeleteLocalRef(jsStart);
if (jsResultStart != nullptr) {
// update start with result
*start =
JniUtil::copyString(m_env, jsResultStart); // also releases jsResultStart
jboolean has_exception = JNI_FALSE;
std::string result = JniUtil::copyString(env, jsResultStart,
&has_exception); // also releases jsResultStart
if (has_exception == JNI_TRUE) {
if (env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
*start = result;
}
m_jvm->DetachCurrentThread();
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const {
@ -112,34 +213,69 @@ void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const {
return;
}
JNIEnv* m_env = getJniEnv();
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
const char* keyUtf = key->c_str();
jstring jsKey = m_env->NewStringUTF(keyUtf);
jstring jsKey = env->NewStringUTF(keyUtf);
if(jsKey == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
} else if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsKey);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
jstring jsResultKey =
(jstring)m_env->CallObjectMethod(m_jComparator,
(jstring)env->CallObjectMethod(m_jComparator,
m_jFindShortSuccessorMethodId, jsKey);
m_env->DeleteLocalRef(jsKey);
if(env->ExceptionCheck()) {
// exception thrown from CallObjectMethod
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jsKey);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->DeleteLocalRef(jsKey);
if (jsResultKey != nullptr) {
// updates key with result, also releases jsResultKey.
*key = JniUtil::copyString(m_env, jsResultKey);
jboolean has_exception = JNI_FALSE;
std::string result = JniUtil::copyString(env, jsResultKey, &has_exception);
if (has_exception == JNI_TRUE) {
if (env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
*key = result;
}
m_jvm->DetachCurrentThread();
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
BaseComparatorJniCallback::~BaseComparatorJniCallback() {
JNIEnv* m_env = getJniEnv();
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
m_env->DeleteGlobalRef(m_jComparator);
if(m_jComparator != nullptr) {
env->DeleteGlobalRef(m_jComparator);
}
// Note: do not need to explicitly detach, as this function is effectively
// called from the Java class's disposeInternal method, and so already
// has an attached thread, getJniEnv above is just a no-op Attach to get
// the env jvm->DetachCurrentThread();
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
ComparatorJniCallback::ComparatorJniCallback(
@ -147,15 +283,42 @@ ComparatorJniCallback::ComparatorJniCallback(
const ComparatorJniCallbackOptions* copt) :
BaseComparatorJniCallback(env, jComparator, copt) {
m_jSliceA = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceA == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceB = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceB == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceLimit = env->NewGlobalRef(SliceJni::construct0(env));
if(m_jSliceLimit == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
}
ComparatorJniCallback::~ComparatorJniCallback() {
JNIEnv* m_env = getJniEnv();
m_env->DeleteGlobalRef(m_jSliceA);
m_env->DeleteGlobalRef(m_jSliceB);
m_env->DeleteGlobalRef(m_jSliceLimit);
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
if(m_jSliceA != nullptr) {
env->DeleteGlobalRef(m_jSliceA);
}
if(m_jSliceB != nullptr) {
env->DeleteGlobalRef(m_jSliceB);
}
if(m_jSliceLimit != nullptr) {
env->DeleteGlobalRef(m_jSliceLimit);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
DirectComparatorJniCallback::DirectComparatorJniCallback(
@ -163,14 +326,41 @@ DirectComparatorJniCallback::DirectComparatorJniCallback(
const ComparatorJniCallbackOptions* copt) :
BaseComparatorJniCallback(env, jComparator, copt) {
m_jSliceA = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceA == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceB = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceB == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jSliceLimit = env->NewGlobalRef(DirectSliceJni::construct0(env));
if(m_jSliceLimit == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
}
DirectComparatorJniCallback::~DirectComparatorJniCallback() {
JNIEnv* m_env = getJniEnv();
m_env->DeleteGlobalRef(m_jSliceA);
m_env->DeleteGlobalRef(m_jSliceB);
m_env->DeleteGlobalRef(m_jSliceLimit);
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
if(m_jSliceA != nullptr) {
env->DeleteGlobalRef(m_jSliceA);
}
if(m_jSliceB != nullptr) {
env->DeleteGlobalRef(m_jSliceB);
}
if(m_jSliceLimit != nullptr) {
env->DeleteGlobalRef(m_jSliceLimit);
}
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
} // namespace rocksdb

@ -61,7 +61,6 @@ class BaseComparatorJniCallback : public Comparator {
port::Mutex* mtx_compare;
// used for synchronisation in findShortestSeparator method
port::Mutex* mtx_findShortestSeparator;
JavaVM* m_jvm;
jobject m_jComparator;
std::string m_name;
jmethodID m_jCompareMethodId;
@ -69,7 +68,7 @@ class BaseComparatorJniCallback : public Comparator {
jmethodID m_jFindShortSuccessorMethodId;
protected:
JNIEnv* getJniEnv() const;
JavaVM* m_jvm;
jobject m_jSliceA;
jobject m_jSliceB;
jobject m_jSliceLimit;

@ -75,5 +75,7 @@ jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
*/
void Java_org_rocksdb_RocksMemEnv_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::Env*>(jhandle);
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}

@ -44,7 +44,9 @@ jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv *env, jclass jcls) {
*/
void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv *env, jobject jobj,
jlong jhandle) {
delete reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
auto* eo = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
assert(eo != nullptr);
delete eo;
}
/*
@ -288,7 +290,8 @@ jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv *env,
void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv *env, jobject jobj,
jlong jhandle,
jlong rl_handle) {
auto *rate_limiter = reinterpret_cast<rocksdb::RateLimiter *>(rl_handle);
auto *env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
env_opt->rate_limiter = rate_limiter;
auto* sptr_rate_limiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(rl_handle);
auto* env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
env_opt->rate_limiter = sptr_rate_limiter->get();
}

@ -31,17 +31,35 @@ jlong Java_org_rocksdb_ExternalSstFileInfo_newExternalSstFileInfo__Ljava_lang_St
JNIEnv *env, jclass jcls, jstring jfile_path, jstring jsmallest_key,
jstring jlargest_key, jlong jsequence_number, jlong jfile_size,
jint jnum_entries, jint jversion) {
const char *file_path = env->GetStringUTFChars(jfile_path, NULL);
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, NULL);
const char *largest_key = env->GetStringUTFChars(jlargest_key, NULL);
const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
if(file_path == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, nullptr);
if(smallest_key == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jfile_path, file_path);
return 0;
}
const char *largest_key = env->GetStringUTFChars(jlargest_key, nullptr);
if(largest_key == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jfile_path, file_path);
return 0;
}
auto *external_sst_file_info = new rocksdb::ExternalSstFileInfo(
file_path, smallest_key, largest_key,
static_cast<rocksdb::SequenceNumber>(jsequence_number),
static_cast<uint64_t>(jfile_size), static_cast<int32_t>(jnum_entries),
static_cast<int32_t>(jversion));
env->ReleaseStringUTFChars(jfile_path, file_path);
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jlargest_key, largest_key);
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
env->ReleaseStringUTFChars(jfile_path, file_path);
return reinterpret_cast<jlong>(external_sst_file_info);
}
@ -55,7 +73,11 @@ void Java_org_rocksdb_ExternalSstFileInfo_setFilePath(JNIEnv *env, jobject jobj,
jstring jfile_path) {
auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *file_path = env->GetStringUTFChars(jfile_path, NULL);
const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
if(file_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->file_path = file_path;
env->ReleaseStringUTFChars(jfile_path, file_path);
}
@ -81,7 +103,11 @@ void Java_org_rocksdb_ExternalSstFileInfo_setSmallestKey(
JNIEnv *env, jobject jobj, jlong jhandle, jstring jsmallest_key) {
auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, NULL);
const char *smallest_key = env->GetStringUTFChars(jsmallest_key, nullptr);
if(smallest_key == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->smallest_key = smallest_key;
env->ReleaseStringUTFChars(jsmallest_key, smallest_key);
}
@ -111,6 +137,10 @@ void Java_org_rocksdb_ExternalSstFileInfo_setLargestKey(JNIEnv *env,
auto *external_sst_file_info =
reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
const char *largest_key = env->GetStringUTFChars(jlargest_key, NULL);
if(largest_key == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
external_sst_file_info->largest_key = largest_key;
env->ReleaseStringUTFChars(jlargest_key, largest_key);
}
@ -238,5 +268,7 @@ jint Java_org_rocksdb_ExternalSstFileInfo_version(JNIEnv *env, jobject jobj,
void Java_org_rocksdb_ExternalSstFileInfo_disposeInternal(JNIEnv *env,
jobject jobj,
jlong jhandle) {
delete reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
auto* esfi = reinterpret_cast<rocksdb::ExternalSstFileInfo *>(jhandle);
assert(esfi != nullptr);
delete esfi;
}

@ -24,12 +24,10 @@
jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
JNIEnv* env, jclass jcls, jint bits_per_key,
jboolean use_block_base_builder) {
auto* fp = const_cast<rocksdb::FilterPolicy *>(
rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder));
auto* pFilterPolicy =
new std::shared_ptr<rocksdb::FilterPolicy>;
*pFilterPolicy = std::shared_ptr<rocksdb::FilterPolicy>(fp);
return reinterpret_cast<jlong>(pFilterPolicy);
auto* sptr_filter =
new std::shared_ptr<const rocksdb::FilterPolicy>(
rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder));
return reinterpret_cast<jlong>(sptr_filter);
}
/*
@ -39,8 +37,7 @@ jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
*/
void Java_org_rocksdb_Filter_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::FilterPolicy> *handle =
reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(jhandle);
handle->reset();
auto* handle =
reinterpret_cast<std::shared_ptr<const rocksdb::FilterPolicy> *>(jhandle);
delete handle; // delete std::shared_ptr
}

@ -21,7 +21,8 @@
*/
void Java_org_rocksdb_RocksIterator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
assert(it != nullptr);
delete it;
}
@ -83,11 +84,16 @@ void Java_org_rocksdb_RocksIterator_prev0(
void Java_org_rocksdb_RocksIterator_seek0(
JNIEnv* env, jobject jobj, jlong handle,
jbyteArray jtarget, jint jtarget_len) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
jbyte* target = env->GetByteArrayElements(jtarget, 0);
jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
if(target == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Slice target_slice(
reinterpret_cast<char*>(target), jtarget_len);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
it->Seek(target_slice);
env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
@ -100,7 +106,7 @@ void Java_org_rocksdb_RocksIterator_seek0(
*/
void Java_org_rocksdb_RocksIterator_status0(
JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Status s = it->status();
if (s.ok()) {
@ -117,10 +123,14 @@ void Java_org_rocksdb_RocksIterator_status0(
*/
jbyteArray Java_org_rocksdb_RocksIterator_key0(
JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice key_slice = it->key();
jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
if(jkey == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()),
reinterpret_cast<const jbyte*>(key_slice.data()));
return jkey;
@ -133,11 +143,15 @@ jbyteArray Java_org_rocksdb_RocksIterator_key0(
*/
jbyteArray Java_org_rocksdb_RocksIterator_value0(
JNIEnv* env, jobject jobj, jlong handle) {
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice value_slice = it->value();
jbyteArray jkeyValue =
env->NewByteArray(static_cast<jsize>(value_slice.size()));
if(jkeyValue == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
reinterpret_cast<const jbyte*>(value_slice.data()));
return jkeyValue;

@ -10,53 +10,106 @@
#include "rocksjni/loggerjnicallback.h"
#include "rocksjni/portal.h"
#include <cstdarg>
#include <cstdio>
namespace rocksdb {
LoggerJniCallback::LoggerJniCallback(
JNIEnv* env, jobject jlogger) {
const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm);
assert(rs == JNI_OK);
// Note: Logger methods may be accessed by multiple threads,
// so we ref the jvm not the env
const jint rs = env->GetJavaVM(&m_jvm);
if(rs != JNI_OK) {
// exception thrown
return;
}
// Note: we want to access the Java Logger instance
// across multiple method calls, so we create a global ref
assert(jlogger != nullptr);
m_jLogger = env->NewGlobalRef(jlogger);
if(m_jLogger == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jLogMethodId = LoggerJni::getLogMethodId(env);
if(m_jLogMethodId == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return;
}
jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env);
assert(jdebug_level != nullptr);
if(jdebug_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jdebug_level = env->NewGlobalRef(jdebug_level);
if(m_jdebug_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env);
assert(jinfo_level != nullptr);
if(jinfo_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jinfo_level = env->NewGlobalRef(jinfo_level);
if(m_jinfo_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env);
assert(jwarn_level != nullptr);
if(jwarn_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jwarn_level = env->NewGlobalRef(jwarn_level);
if(m_jwarn_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env);
assert(jerror_level != nullptr);
if(jerror_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jerror_level = env->NewGlobalRef(jerror_level);
if(m_jerror_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env);
assert(jfatal_level != nullptr);
if(jfatal_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jfatal_level = env->NewGlobalRef(jfatal_level);
if(m_jfatal_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env);
assert(jheader_level != nullptr);
if(jheader_level == nullptr) {
// exception thrown: NoSuchFieldError, ExceptionInInitializerError
// or OutOfMemoryError
return;
}
m_jheader_level = env->NewGlobalRef(jheader_level);
}
/**
* Get JNIEnv for current native thread
*/
JNIEnv* LoggerJniCallback::getJniEnv() const {
JNIEnv *env;
jint rs __attribute__((unused)) =
m_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
assert(rs == JNI_OK);
return env;
if(m_jheader_level == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
}
void LoggerJniCallback::Logv(const char* format, va_list ap) {
@ -94,69 +147,96 @@ void LoggerJniCallback::Logv(const InfoLogLevel log_level,
break;
}
// We try twice: the first time with a fixed-size stack allocated buffer,
// and the second time with a much larger dynamically allocated buffer.
char buffer[500];
for (int iter = 0; iter < 2; iter++) {
char* base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
base = new char[bufsize];
}
char* p = base;
char* limit = base + bufsize;
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
p += vsnprintf(p, limit - p, format, backup_ap);
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
}
assert(p < limit);
*p++ = '\0';
JNIEnv* env = getJniEnv();
assert(format != nullptr);
assert(ap != nullptr);
const std::unique_ptr<char[]> msg = format_str(format, ap);
// pass java string to callback handler
env->CallVoidMethod(
m_jLogger,
m_jLogMethodId,
jlog_level,
env->NewStringUTF(base));
// pass msg to java callback handler
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
if (base != buffer) {
delete[] base;
jstring jmsg = env->NewStringUTF(msg.get());
if(jmsg == nullptr) {
// unable to construct string
if(env->ExceptionCheck()) {
env->ExceptionDescribe(); // print out exception to stderr
}
break;
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
if(env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
env->CallVoidMethod(m_jLogger, m_jLogMethodId, jlog_level, jmsg);
if(env->ExceptionCheck()) {
// exception thrown
env->ExceptionDescribe(); // print out exception to stderr
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
return;
}
m_jvm->DetachCurrentThread();
env->DeleteLocalRef(jmsg);
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
}
std::unique_ptr<char[]> LoggerJniCallback::format_str(const char* format, va_list ap) const {
va_list ap_copy;
va_copy(ap_copy, ap);
const size_t required = vsnprintf(nullptr, 0, format, ap_copy) + 1; // Extra space for '\0'
va_end(ap_copy);
std::unique_ptr<char[]> buf(new char[required]);
va_copy(ap_copy, ap);
vsnprintf(buf.get(), required, format, ap_copy);
va_end(ap_copy);
return buf;
}
LoggerJniCallback::~LoggerJniCallback() {
JNIEnv* env = getJniEnv();
env->DeleteGlobalRef(m_jLogger);
jboolean attached_thread = JNI_FALSE;
JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
assert(env != nullptr);
if(m_jLogger != nullptr) {
env->DeleteGlobalRef(m_jLogger);
}
if(m_jdebug_level != nullptr) {
env->DeleteGlobalRef(m_jdebug_level);
}
if(m_jinfo_level != nullptr) {
env->DeleteGlobalRef(m_jinfo_level);
}
if(m_jwarn_level != nullptr) {
env->DeleteGlobalRef(m_jwarn_level);
}
if(m_jerror_level != nullptr) {
env->DeleteGlobalRef(m_jerror_level);
}
if(m_jfatal_level != nullptr) {
env->DeleteGlobalRef(m_jfatal_level);
}
env->DeleteGlobalRef(m_jdebug_level);
env->DeleteGlobalRef(m_jinfo_level);
env->DeleteGlobalRef(m_jwarn_level);
env->DeleteGlobalRef(m_jerror_level);
env->DeleteGlobalRef(m_jfatal_level);
env->DeleteGlobalRef(m_jheader_level);
if(m_jheader_level != nullptr) {
env->DeleteGlobalRef(m_jheader_level);
}
m_jvm->DetachCurrentThread();
JniUtil::releaseJniEnv(m_jvm, attached_thread);
}
} // namespace rocksdb
@ -168,15 +248,14 @@ LoggerJniCallback::~LoggerJniCallback() {
*/
jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
JNIEnv* env, jobject jobj, jlong joptions) {
rocksdb::LoggerJniCallback* c =
new rocksdb::LoggerJniCallback(env, jobj);
auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
new rocksdb::LoggerJniCallback(env, jobj));
// set log level
c->SetInfoLogLevel(reinterpret_cast<rocksdb::Options*>
(joptions)->info_log_level);
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>;
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
return reinterpret_cast<jlong>(pLoggerJniCallback);
auto* options = reinterpret_cast<rocksdb::Options*>(joptions);
sptr_logger->get()->SetInfoLogLevel(options->info_log_level);
return reinterpret_cast<jlong>(sptr_logger);
}
/*
@ -186,15 +265,14 @@ jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
*/
jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
JNIEnv* env, jobject jobj, jlong jdb_options) {
rocksdb::LoggerJniCallback* c =
new rocksdb::LoggerJniCallback(env, jobj);
auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
new rocksdb::LoggerJniCallback(env, jobj));
// set log level
c->SetInfoLogLevel(reinterpret_cast<rocksdb::DBOptions*>
(jdb_options)->info_log_level);
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>;
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
return reinterpret_cast<jlong>(pLoggerJniCallback);
auto* db_options = reinterpret_cast<rocksdb::DBOptions*>(jdb_options);
sptr_logger->get()->SetInfoLogLevel(db_options->info_log_level);
return reinterpret_cast<jlong>(sptr_logger);
}
/*
@ -204,9 +282,10 @@ jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
*/
void Java_org_rocksdb_Logger_setInfoLogLevel(
JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle =
auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
(*handle)->SetInfoLogLevel(static_cast<rocksdb::InfoLogLevel>(jlog_level));
handle->get()->
SetInfoLogLevel(static_cast<rocksdb::InfoLogLevel>(jlog_level));
}
/*
@ -216,9 +295,9 @@ void Java_org_rocksdb_Logger_setInfoLogLevel(
*/
jbyte Java_org_rocksdb_Logger_infoLogLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle =
auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
return static_cast<jbyte>((*handle)->GetInfoLogLevel());
return static_cast<jbyte>(handle->get()->GetInfoLogLevel());
}
/*
@ -228,7 +307,7 @@ jbyte Java_org_rocksdb_Logger_infoLogLevel(
*/
void Java_org_rocksdb_Logger_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::LoggerJniCallback> *handle =
auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
handle->reset();
delete handle; // delete std::shared_ptr
}

@ -10,6 +10,7 @@
#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
#include <jni.h>
#include <memory>
#include <string>
#include "port/port.h"
#include "rocksdb/env.h"
@ -32,8 +33,6 @@ namespace rocksdb {
virtual void Logv(const InfoLogLevel log_level,
const char* format, va_list ap);
protected:
JNIEnv* getJniEnv() const;
private:
JavaVM* m_jvm;
jobject m_jLogger;
@ -44,6 +43,7 @@ namespace rocksdb {
jobject m_jerror_level;
jobject m_jfatal_level;
jobject m_jheader_level;
std::unique_ptr<char[]> format_str(const char* format, va_list ap) const;
};
} // namespace rocksdb

@ -25,13 +25,24 @@
/*
* Class: org_rocksdb_StringAppendOperator
* Method: newMergeOperatorHandle
* Method: newSharedStringAppendOperator
* Signature: ()J
*/
jlong Java_org_rocksdb_StringAppendOperator_newMergeOperatorHandleImpl
(JNIEnv* env, jobject jobj) {
std::shared_ptr<rocksdb::MergeOperator> *op =
new std::shared_ptr<rocksdb::MergeOperator>();
*op = rocksdb::MergeOperators::CreateFromStringId("stringappend");
return reinterpret_cast<jlong>(op);
jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator
(JNIEnv* env, jclass jclazz) {
auto* sptr_string_append_op = new std::shared_ptr<rocksdb::MergeOperator>(
rocksdb::MergeOperators::CreateFromStringId("stringappend"));
return reinterpret_cast<jlong>(sptr_string_append_op);
}
/*
* Class: org_rocksdb_StringAppendOperator
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_StringAppendOperator_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* sptr_string_append_op =
reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>* >(jhandle);
delete sptr_string_append_op; // delete std::shared_ptr
}

@ -39,7 +39,7 @@
* Signature: ()J
*/
jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) {
rocksdb::Options* op = new rocksdb::Options();
auto* op = new rocksdb::Options();
return reinterpret_cast<jlong>(op);
}
@ -53,7 +53,7 @@ jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
auto* cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(
jcfoptions);
rocksdb::Options* op = new rocksdb::Options(*dbOpt, *cfOpt);
auto* op = new rocksdb::Options(*dbOpt, *cfOpt);
return reinterpret_cast<jlong>(op);
}
@ -64,7 +64,9 @@ jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
*/
void Java_org_rocksdb_Options_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::Options*>(handle);
auto* op = reinterpret_cast<rocksdb::Options*>(handle);
assert(op != nullptr);
delete op;
}
/*
@ -157,10 +159,16 @@ void Java_org_rocksdb_Options_setComparatorHandle__JJ(
*/
void Java_org_rocksdb_Options_setMergeOperatorName(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
auto options = reinterpret_cast<rocksdb::Options*>(jhandle);
const char* op_name = env->GetStringUTFChars(jop_name, 0);
const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
if(op_name == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
options->merge_operator = rocksdb::MergeOperators::CreateFromStringId(
op_name);
env->ReleaseStringUTFChars(jop_name, op_name);
}
@ -231,7 +239,7 @@ void Java_org_rocksdb_Options_createStatistics(
*/
jlong Java_org_rocksdb_Options_statisticsPtr(
JNIEnv* env, jobject jobj, jlong jOptHandle) {
auto st = reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics.get();
auto* st = reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics.get();
return reinterpret_cast<jlong>(st);
}
@ -381,7 +389,11 @@ jstring Java_org_rocksdb_Options_dbLogDir(
*/
void Java_org_rocksdb_Options_setDbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0);
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if(log_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.assign(log_dir);
env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
}
@ -404,7 +416,11 @@ jstring Java_org_rocksdb_Options_walDir(
*/
void Java_org_rocksdb_Options_setWalDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) {
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if(wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.assign(wal_dir);
env->ReleaseStringUTFChars(jwal_dir, wal_dir);
}
@ -494,8 +510,7 @@ void Java_org_rocksdb_Options_setMaxSubcompactions(
*/
jint Java_org_rocksdb_Options_maxSubcompactions(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->max_subcompactions;
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_subcompactions;
}
/*
@ -641,7 +656,7 @@ jlong Java_org_rocksdb_Options_maxManifestFileSize(
*/
jstring Java_org_rocksdb_Options_memTableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
// Should never be nullptr.
@ -677,17 +692,6 @@ void Java_org_rocksdb_Options_setMemTableFactory(
reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
}
/*
* Class: org_rocksdb_Options
* Method: setOldRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setOldRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->rate_limiter.reset(
reinterpret_cast<rocksdb::RateLimiter*>(jrate_limiter_handle));
}
/*
* Class: org_rocksdb_Options
* Method: setRateLimiter
@ -1144,7 +1148,7 @@ jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(
*/
jstring Java_org_rocksdb_Options_tableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get();
// Should never be nullptr.
@ -1224,46 +1228,78 @@ jbyte Java_org_rocksdb_Options_compressionType(
return reinterpret_cast<rocksdb::Options*>(jhandle)->compression;
}
/*
* Helper method to convert a Java list to a CompressionType
* vector.
*/
std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper(
JNIEnv* env, jbyteArray jcompressionLevels) {
std::vector<rocksdb::CompressionType> compressionLevels;
/**
* Helper method to convert a Java byte array of compression levels
* to a C++ vector of rocksdb::CompressionType
*
* @param env A pointer to the Java environment
* @param jcompression_levels A reference to a java byte array
* where each byte indicates a compression level
*
* @return A unique_ptr to the vector, or unique_ptr(nullptr) if a JNI exception occurs
*/
std::unique_ptr<std::vector<rocksdb::CompressionType>> rocksdb_compression_vector_helper(
JNIEnv* env, jbyteArray jcompression_levels) {
jsize len = env->GetArrayLength(jcompression_levels);
jbyte* jcompression_level =
env->GetByteArrayElements(jcompression_levels, nullptr);
if(jcompression_level == nullptr) {
// exception thrown: OutOfMemoryError
return std::unique_ptr<std::vector<rocksdb::CompressionType>>();
}
auto* compression_levels = new std::vector<rocksdb::CompressionType>();
std::unique_ptr<std::vector<rocksdb::CompressionType>> uptr_compression_levels(compression_levels);
jsize len = env->GetArrayLength(jcompressionLevels);
jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels,
NULL);
for(int i = 0; i < len; i++) {
jbyte jcl;
jcl = jcompressionLevel[i];
compressionLevels.push_back(static_cast<rocksdb::CompressionType>(jcl));
for(jsize i = 0; i < len; i++) {
jbyte jcl = jcompression_level[i];
compression_levels->push_back(static_cast<rocksdb::CompressionType>(jcl));
}
env->ReleaseByteArrayElements(jcompressionLevels, jcompressionLevel,
env->ReleaseByteArrayElements(jcompression_levels, jcompression_level,
JNI_ABORT);
return compressionLevels;
return uptr_compression_levels;
}
/*
* Helper method to convert a CompressionType vector to a Java
* List.
/**
* Helper method to convert a C++ vector of rocksdb::CompressionType
* to a Java byte array of compression levels
*
* @param env A pointer to the Java environment
* @param jcompression_levels A reference to a java byte array
* where each byte indicates a compression level
*
* @return A jbytearray or nullptr if an exception occurs
*/
jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
std::vector<rocksdb::CompressionType> compressionLevels) {
std::unique_ptr<jbyte[]> jbuf =
std::unique_ptr<jbyte[]>(new jbyte[compressionLevels.size()]);
for (std::vector<rocksdb::CompressionType>::size_type i = 0;
i != compressionLevels.size(); i++) {
jbuf[i] = compressionLevels[i];
std::vector<rocksdb::CompressionType> compression_levels) {
const size_t len = compression_levels.size();
jbyte* jbuf = new jbyte[len];
for (size_t i = 0; i < len; i++) {
jbuf[i] = compression_levels[i];
}
// insert in java array
jbyteArray jcompressionLevels = env->NewByteArray(
static_cast<jsize>(compressionLevels.size()));
env->SetByteArrayRegion(jcompressionLevels, 0,
static_cast<jsize>(compressionLevels.size()), jbuf.get());
return jcompressionLevels;
jbyteArray jcompression_levels = env->NewByteArray(static_cast<jsize>(len));
if(jcompression_levels == nullptr) {
// exception thrown: OutOfMemoryError
delete [] jbuf;
return nullptr;
}
env->SetByteArrayRegion(jcompression_levels, 0, static_cast<jsize>(len),
jbuf);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jcompression_levels);
delete [] jbuf;
return nullptr;
}
delete [] jbuf;
return jcompression_levels;
}
/*
@ -1274,10 +1310,14 @@ jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
void Java_org_rocksdb_Options_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
auto uptr_compression_levels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels;
if(!uptr_compression_levels) {
// exception occurred
return;
}
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
options->compression_per_level = *(uptr_compression_levels.get());
}
/*
@ -1946,7 +1986,6 @@ jlong Java_org_rocksdb_Options_memtableHugePageSize(
void Java_org_rocksdb_Options_setMemtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmemtable_huge_page_size) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
jmemtable_huge_page_size);
if (s.ok()) {
@ -2083,8 +2122,9 @@ void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
*/
jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::Options*>(
jhandle)->max_bytes_for_level_multiplier_additional;
auto mbflma =
reinterpret_cast<rocksdb::Options*>(jhandle)->
max_bytes_for_level_multiplier_additional;
const size_t size = mbflma.size();
@ -2095,7 +2135,19 @@ jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
jsize jlen = static_cast<jsize>(size);
jintArray result = env->NewIntArray(jlen);
if(result == nullptr) {
// exception thrown: OutOfMemoryError
delete [] additionals;
return nullptr;
}
env->SetIntArrayRegion(result, 0, jlen, additionals);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(result);
delete [] additionals;
return nullptr;
}
delete [] additionals;
@ -2112,12 +2164,20 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, nullptr);
if(additionals == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) {
opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
}
env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
additionals, JNI_ABORT);
}
/*
@ -2153,7 +2213,7 @@ void Java_org_rocksdb_Options_setParanoidFileChecks(
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
JNIEnv* env, jclass jcls) {
rocksdb::ColumnFamilyOptions* op = new rocksdb::ColumnFamilyOptions();
auto* op = new rocksdb::ColumnFamilyOptions();
return reinterpret_cast<jlong>(op);
}
@ -2164,14 +2224,20 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
JNIEnv* env, jclass jclazz, jstring jopt_string) {
jlong ret_value = 0;
rocksdb::ColumnFamilyOptions* cf_options =
new rocksdb::ColumnFamilyOptions();
const char* opt_string = env->GetStringUTFChars(jopt_string, 0);
const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
if(opt_string == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* cf_options = new rocksdb::ColumnFamilyOptions();
rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString(
rocksdb::ColumnFamilyOptions(), opt_string, cf_options);
env->ReleaseStringUTFChars(jopt_string, opt_string);
// Check if ColumnFamilyOptions creation was possible.
jlong ret_value = 0;
if (status.ok()) {
ret_value = reinterpret_cast<jlong>(cf_options);
} else {
@ -2189,7 +2255,9 @@ jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
*/
void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle);
auto* cfo = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle);
assert(cfo != nullptr);
delete cfo;
}
/*
@ -2265,10 +2333,15 @@ void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJ(
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
auto options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
const char* op_name = env->GetStringUTFChars(jop_name, 0);
options->merge_operator = rocksdb::MergeOperators::CreateFromStringId(
op_name);
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
if(op_name == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
options->merge_operator =
rocksdb::MergeOperators::CreateFromStringId(op_name);
env->ReleaseStringUTFChars(jop_name, op_name);
}
@ -2364,7 +2437,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory(
*/
jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
// Should never be nullptr.
@ -2418,7 +2491,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory(
*/
jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get();
// Should never be nullptr.
@ -2508,9 +2581,13 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
auto uptr_compression_levels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels;
if(!uptr_compression_levels) {
// exception occurred
return;
}
options->compression_per_level = *(uptr_compression_levels.get());
}
/*
@ -2520,9 +2597,9 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
*/
jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb_compression_list_helper(env,
options->compression_per_level);
cf_options->compression_per_level);
}
/*
@ -2668,7 +2745,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger(
*/
jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
return 0;
return 0; // deprecated and intentionally not implemented, see the Java code
}
/*
@ -2677,7 +2754,9 @@ jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel(
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxMemCompactionLevel(
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) {}
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) {
// deprecated and intentionally not implemented, see the Java code
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
@ -3308,9 +3387,19 @@ jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditio
}
jsize jlen = static_cast<jsize>(size);
jintArray result;
result = env->NewIntArray(jlen);
jintArray result = env->NewIntArray(jlen);
if(result == nullptr) {
// exception thrown: OutOfMemoryError
delete [] additionals;
return nullptr;
}
env->SetIntArrayRegion(result, 0, jlen, additionals);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(result);
delete [] additionals;
return nullptr;
}
delete [] additionals;
@ -3328,11 +3417,19 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditiona
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
if(additionals == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
auto* cf_opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) {
cf_opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
}
env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
additionals, JNI_ABORT);
}
/*
@ -3369,7 +3466,7 @@ void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
*/
jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
jclass jcls) {
rocksdb::DBOptions* dbop = new rocksdb::DBOptions();
auto* dbop = new rocksdb::DBOptions();
return reinterpret_cast<jlong>(dbop);
}
@ -3380,14 +3477,20 @@ jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
*/
jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
JNIEnv* env, jclass jclazz, jstring jopt_string) {
jlong ret_value = 0;
rocksdb::DBOptions* db_options =
new rocksdb::DBOptions();
const char* opt_string = env->GetStringUTFChars(jopt_string, 0);
const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
if(opt_string == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* db_options = new rocksdb::DBOptions();
rocksdb::Status status = rocksdb::GetDBOptionsFromString(
rocksdb::DBOptions(), opt_string, db_options);
env->ReleaseStringUTFChars(jopt_string, opt_string);
// Check if DBOptions creation was possible.
jlong ret_value = 0;
if (status.ok()) {
ret_value = reinterpret_cast<jlong>(db_options);
} else {
@ -3405,7 +3508,9 @@ jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
*/
void Java_org_rocksdb_DBOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::DBOptions*>(handle);
auto* dbo = reinterpret_cast<rocksdb::DBOptions*>(handle);
assert(dbo != nullptr);
delete dbo;
}
/*
@ -3505,17 +3610,6 @@ jboolean Java_org_rocksdb_DBOptions_paranoidChecks(
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks;
}
/*
* Class: org_rocksdb_DBOptions
* Method: setOldRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setOldRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->rate_limiter.reset(
reinterpret_cast<rocksdb::RateLimiter*>(jrate_limiter_handle));
}
/*
* Class: org_rocksdb_DBOptions
* Method: setRateLimiter
@ -3626,7 +3720,7 @@ void Java_org_rocksdb_DBOptions_createStatistics(
*/
jlong Java_org_rocksdb_DBOptions_statisticsPtr(
JNIEnv* env, jobject jobj, jlong jOptHandle) {
auto st = reinterpret_cast<rocksdb::DBOptions*>(jOptHandle)->
auto* st = reinterpret_cast<rocksdb::DBOptions*>(jOptHandle)->
statistics.get();
return reinterpret_cast<jlong>(st);
}
@ -3659,7 +3753,12 @@ jboolean Java_org_rocksdb_DBOptions_useFsync(
*/
void Java_org_rocksdb_DBOptions_setDbLogDir(
JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0);
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if(log_dir == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.assign(log_dir);
env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
}
@ -4307,19 +4406,17 @@ jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(
}
void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
JNIEnv* env, jobject jobj, jlong jhandle, jlong delay_write_rate){
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
delayed_write_rate = static_cast<int64_t>(delay_write_rate);
}
JNIEnv* env, jobject jobj, jlong jhandle, jlong delay_write_rate) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->delayed_write_rate =
static_cast<int64_t>(delay_write_rate);
}
jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
JNIEnv* env, jobject jobj, jlong jhandle){
jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
delayed_write_rate;
}
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
delayed_write_rate;
}
//////////////////////////////////////////////////////////////////////////////
// rocksdb::WriteOptions
@ -4330,7 +4427,7 @@ void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
*/
jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
JNIEnv* env, jclass jcls) {
rocksdb::WriteOptions* op = new rocksdb::WriteOptions();
auto* op = new rocksdb::WriteOptions();
return reinterpret_cast<jlong>(op);
}
@ -4341,7 +4438,8 @@ jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
*/
void Java_org_rocksdb_WriteOptions_disposeInternal(
JNIEnv* env, jobject jwrite_options, jlong jhandle) {
auto write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
assert(write_options != nullptr);
delete write_options;
}
@ -4395,8 +4493,8 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL(
*/
jlong Java_org_rocksdb_ReadOptions_newReadOptions(
JNIEnv* env, jclass jcls) {
auto read_opt = new rocksdb::ReadOptions();
return reinterpret_cast<jlong>(read_opt);
auto* read_options = new rocksdb::ReadOptions();
return reinterpret_cast<jlong>(read_options);
}
/*
@ -4406,7 +4504,9 @@ jlong Java_org_rocksdb_ReadOptions_newReadOptions(
*/
void Java_org_rocksdb_ReadOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
auto* read_options = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
assert(read_options != nullptr);
delete read_options;
}
/*
@ -4613,7 +4713,7 @@ void Java_org_rocksdb_ReadOptions_setReadTier(
*/
jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
JNIEnv* env, jclass jcls) {
auto comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
auto* comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
return reinterpret_cast<jlong>(comparator_opt);
}
@ -4646,7 +4746,10 @@ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex(
*/
void Java_org_rocksdb_ComparatorOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
auto* comparator_opt =
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
assert(comparator_opt != nullptr);
delete comparator_opt;
}
/////////////////////////////////////////////////////////////////////
@ -4659,7 +4762,7 @@ void Java_org_rocksdb_ComparatorOptions_disposeInternal(
*/
jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
JNIEnv* env, jclass jcls) {
auto flush_opt = new rocksdb::FlushOptions();
auto* flush_opt = new rocksdb::FlushOptions();
return reinterpret_cast<jlong>(flush_opt);
}
@ -4692,5 +4795,7 @@ jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
*/
void Java_org_rocksdb_FlushOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
auto* flush_opt = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
assert(flush_opt != nullptr);
delete flush_opt;
}

File diff suppressed because it is too large Load Diff

@ -6,24 +6,9 @@
// This file implements the "bridge" between Java and C++ for RateLimiter.
#include "rocksjni/portal.h"
#include "include/org_rocksdb_GenericRateLimiterConfig.h"
#include "include/org_rocksdb_RateLimiter.h"
#include "rocksdb/rate_limiter.h"
/*
* Class: org_rocksdb_GenericRateLimiterConfig
* Method: newRateLimiterHandle
* Signature: (JJI)J
*/
jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle(
JNIEnv* env, jobject jobj, jlong jrate_bytes_per_second,
jlong jrefill_period_micros, jint jfairness) {
return reinterpret_cast<jlong>(rocksdb::NewGenericRateLimiter(
static_cast<int64_t>(jrate_bytes_per_second),
static_cast<int64_t>(jrefill_period_micros),
static_cast<int32_t>(jfairness)));
}
/*
* Class: org_rocksdb_RateLimiter
* Method: newRateLimiterHandle
@ -32,16 +17,13 @@ jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle(
jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
JNIEnv* env, jclass jclazz, jlong jrate_bytes_per_second,
jlong jrefill_period_micros, jint jfairness) {
auto* rate_limiter = rocksdb::NewGenericRateLimiter(
static_cast<int64_t>(jrate_bytes_per_second),
static_cast<int64_t>(jrefill_period_micros),
static_cast<int32_t>(jfairness));
auto * sptr_rate_limiter =
new std::shared_ptr<rocksdb::RateLimiter>(rocksdb::NewGenericRateLimiter(
static_cast<int64_t>(jrate_bytes_per_second),
static_cast<int64_t>(jrefill_period_micros),
static_cast<int32_t>(jfairness)));
std::shared_ptr<rocksdb::RateLimiter> *ptr_sptr_rate_limiter =
new std::shared_ptr<rocksdb::RateLimiter>;
*ptr_sptr_rate_limiter = std::shared_ptr<rocksdb::RateLimiter>(rate_limiter);
return reinterpret_cast<jlong>(ptr_sptr_rate_limiter);
return reinterpret_cast<jlong>(sptr_rate_limiter);
}
/*
@ -51,10 +33,9 @@ jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
*/
void Java_org_rocksdb_RateLimiter_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
std::shared_ptr<rocksdb::RateLimiter> *handle =
auto* handle =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jhandle);
handle->reset();
delete handle;
delete handle; // delete std::shared_ptr
}
/*
@ -65,8 +46,8 @@ void Java_org_rocksdb_RateLimiter_disposeInternal(
void Java_org_rocksdb_RateLimiter_setBytesPerSecond(
JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes_per_second) {
reinterpret_cast<rocksdb::RateLimiter*>(
handle)->SetBytesPerSecond(jbytes_per_second);
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
SetBytesPerSecond(jbytes_per_second);
}
/*
@ -77,9 +58,8 @@ void Java_org_rocksdb_RateLimiter_setBytesPerSecond(
void Java_org_rocksdb_RateLimiter_request(
JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes) {
reinterpret_cast<rocksdb::RateLimiter*>(
handle)->Request(jbytes,
rocksdb::Env::IO_TOTAL);
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
Request(jbytes, rocksdb::Env::IO_TOTAL);
}
/*
@ -88,10 +68,9 @@ void Java_org_rocksdb_RateLimiter_request(
* Signature: (J)J
*/
jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(
JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes) {
return reinterpret_cast<rocksdb::RateLimiter*>(
handle)->GetSingleBurstBytes();
JNIEnv* env, jobject jobj, jlong handle) {
return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
get()->GetSingleBurstBytes();
}
/*
@ -100,10 +79,9 @@ jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(
* Signature: (J)J
*/
jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(
JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes) {
return reinterpret_cast<rocksdb::RateLimiter*>(
handle)->GetTotalBytesThrough();
JNIEnv* env, jobject jobj, jlong handle) {
return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
get()->GetTotalBytesThrough();
}
/*
@ -112,8 +90,7 @@ jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(
* Signature: (J)J
*/
jlong Java_org_rocksdb_RateLimiter_getTotalRequests(
JNIEnv* env, jobject jobj, jlong handle,
jlong jbytes) {
return reinterpret_cast<rocksdb::RateLimiter*>(
handle)->GetTotalRequests();
JNIEnv* env, jobject jobj, jlong handle) {
return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
get()->GetTotalRequests();
}

@ -22,7 +22,7 @@
*/
jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
jclass jcls, jboolean keep_log_files) {
auto ropt = new rocksdb::RestoreOptions(keep_log_files);
auto* ropt = new rocksdb::RestoreOptions(keep_log_files);
return reinterpret_cast<jlong>(ropt);
}
@ -33,7 +33,7 @@ jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
*/
void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj,
jlong jhandle) {
auto ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle);
auto* ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle);
assert(ropt);
delete ropt;
}

File diff suppressed because it is too large Load Diff

@ -26,8 +26,17 @@
*/
jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
JNIEnv * env, jclass jcls, jstring jstr) {
const auto* str = env->GetStringUTFChars(jstr, NULL);
const auto* str = env->GetStringUTFChars(jstr, nullptr);
if(str == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const size_t len = strlen(str);
// NOTE: buf will be deleted in the
// Java_org_rocksdb_Slice_disposeInternalBuf or
// or Java_org_rocksdb_DirectSlice_disposeInternalBuf methods
char* buf = new char[len + 1];
memcpy(buf, str, len);
buf[len] = 0;
@ -118,13 +127,18 @@ void Java_org_rocksdb_AbstractSlice_disposeInternal(
*/
jlong Java_org_rocksdb_Slice_createNewSlice0(
JNIEnv * env, jclass jcls, jbyteArray data, jint offset) {
const jsize dataSize = env->GetArrayLength(data);
const int len = dataSize - offset;
jbyte* ptrData = new jbyte[len];
env->GetByteArrayRegion(data, offset, len, ptrData);
const auto* slice = new rocksdb::Slice((const char*)ptrData, len);
// NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
jbyte* buf = new jbyte[len];
env->GetByteArrayRegion(data, offset, len, buf);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
return 0;
}
const auto* slice = new rocksdb::Slice((const char*)buf, len);
return reinterpret_cast<jlong>(slice);
}
@ -135,16 +149,17 @@ jlong Java_org_rocksdb_Slice_createNewSlice0(
*/
jlong Java_org_rocksdb_Slice_createNewSlice1(
JNIEnv * env, jclass jcls, jbyteArray data) {
jbyte* ptrData = env->GetByteArrayElements(data, nullptr);
if(ptrData == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const int len = env->GetArrayLength(data) + 1;
jboolean isCopy;
jbyte* ptrData = env->GetByteArrayElements(data, &isCopy);
// NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method
// NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
char* buf = new char[len];
memcpy(buf, ptrData, len - 1);
buf[len-1]='\0';
buf[len-1] = '\0';
const auto* slice =
new rocksdb::Slice(buf, len - 1);
@ -162,22 +177,61 @@ jlong Java_org_rocksdb_Slice_createNewSlice1(
jbyteArray Java_org_rocksdb_Slice_data0(
JNIEnv* env, jobject jobj, jlong handle) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const int len = static_cast<int>(slice->size());
const jsize len = static_cast<jsize>(slice->size());
const jbyteArray data = env->NewByteArray(len);
if(data == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(data, 0, len,
reinterpret_cast<const jbyte*>(slice->data()));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(data);
return nullptr;
}
return data;
}
/*
* Class: org_rocksdb_Slice
* Method: clear0
* Signature: (JZJ)V
*/
void Java_org_rocksdb_Slice_clear0(
JNIEnv * env, jobject jobj, jlong handle, jboolean shouldRelease,
jlong internalBufferOffset) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
if(shouldRelease == JNI_TRUE) {
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
slice->clear();
}
/*
* Class: org_rocksdb_Slice
* Method: removePrefix0
* Signature: (JI)V
*/
void Java_org_rocksdb_Slice_removePrefix0(
JNIEnv * env, jobject jobj, jlong handle, jint length) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
slice->remove_prefix(length);
}
/*
* Class: org_rocksdb_Slice
* Method: disposeInternalBuf
* Signature: (J)V
* Signature: (JJ)V
*/
void Java_org_rocksdb_Slice_disposeInternalBuf(
JNIEnv * env, jobject jobj, jlong handle) {
JNIEnv * env, jobject jobj, jlong handle, jlong internalBufferOffset) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
delete [] slice->data_;
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
// </editor-fold>
@ -191,8 +245,19 @@ void Java_org_rocksdb_Slice_disposeInternalBuf(
*/
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
JNIEnv* env, jclass jcls, jobject data, jint length) {
assert(data != nullptr);
void* data_addr = env->GetDirectBufferAddress(data);
if(data_addr == nullptr) {
// error: memory region is undefined, given object is not a direct
// java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument(
"Could not access DirectBuffer"));
return 0;
}
const auto* ptrData =
reinterpret_cast<char*>(env->GetDirectBufferAddress(data));
reinterpret_cast<char*>(data_addr);
const auto* slice = new rocksdb::Slice(ptrData, length);
return reinterpret_cast<jlong>(slice);
}
@ -204,8 +269,17 @@ jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
*/
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(
JNIEnv* env, jclass jcls, jobject data) {
const auto* ptrData =
reinterpret_cast<char*>(env->GetDirectBufferAddress(data));
void* data_addr = env->GetDirectBufferAddress(data);
if(data_addr == nullptr) {
// error: memory region is undefined, given object is not a direct
// java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument(
"Could not access DirectBuffer"));
return 0;
}
const auto* ptrData = reinterpret_cast<char*>(data_addr);
const auto* slice = new rocksdb::Slice(ptrData);
return reinterpret_cast<jlong>(slice);
}
@ -236,12 +310,16 @@ jbyte Java_org_rocksdb_DirectSlice_get0(
/*
* Class: org_rocksdb_DirectSlice
* Method: clear0
* Signature: (J)V
* Signature: (JZJ)V
*/
void Java_org_rocksdb_DirectSlice_clear0(
JNIEnv* env, jobject jobj, jlong handle) {
JNIEnv* env, jobject jobj, jlong handle,
jboolean shouldRelease, jlong internalBufferOffset) {
auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
delete [] slice->data_;
if(shouldRelease == JNI_TRUE) {
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
slice->clear();
}
@ -256,4 +334,16 @@ void Java_org_rocksdb_DirectSlice_removePrefix0(
slice->remove_prefix(length);
}
/*
* Class: org_rocksdb_DirectSlice
* Method: disposeInternalBuf
* Signature: (JJ)V
*/
void Java_org_rocksdb_DirectSlice_disposeInternalBuf(
JNIEnv* env, jobject jobj, jlong handle, jlong internalBufferOffset) {
const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const char* buf = slice->data_ - internalBufferOffset;
delete [] buf;
}
// </editor-fold>

@ -42,7 +42,11 @@ jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter(JNIEnv *env, jclass jcls,
*/
void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject jobj,
jlong jhandle, jstring jfile_path) {
const char *file_path = env->GetStringUTFChars(jfile_path, NULL);
const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
if(file_path == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Status s =
reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Open(file_path);
env->ReleaseStringUTFChars(jfile_path, file_path);
@ -62,8 +66,9 @@ void Java_org_rocksdb_SstFileWriter_add(JNIEnv *env, jobject jobj,
jlong jvalue_handle) {
auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle);
auto *value_slice = reinterpret_cast<rocksdb::Slice *>(jvalue_handle);
rocksdb::Status s = reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Add(
*key_slice, *value_slice);
rocksdb::Status s =
reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Add(*key_slice,
*value_slice);
if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}

@ -21,9 +21,8 @@
*/
jlong Java_org_rocksdb_Statistics_getTickerCount0(
JNIEnv* env, jobject jobj, jint tickerType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle);
auto* st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr);
return st->getTickerCount(static_cast<rocksdb::Tickers>(tickerType));
}
@ -34,17 +33,28 @@ jlong Java_org_rocksdb_Statistics_getTickerCount0(
*/
jobject Java_org_rocksdb_Statistics_getHistogramData0(
JNIEnv* env, jobject jobj, jint histogramType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle);
auto* st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr);
rocksdb::HistogramData data;
st->histogramData(static_cast<rocksdb::Histograms>(histogramType),
&data);
// Don't reuse class pointer
jclass jclazz = env->FindClass("org/rocksdb/HistogramData");
jclass jclazz = rocksdb::HistogramDataJni::getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId(
env, jclazz);
return env->NewObject(jclazz, mid, data.median, data.percentile95,
data.percentile99, data.average, data.standard_deviation);
env);
if(mid == nullptr) {
// exception occurred accessing method
return nullptr;
}
return env->NewObject(
jclazz,
mid, data.median, data.percentile95,data.percentile99, data.average,
data.standard_deviation);
}

@ -67,12 +67,5 @@ jobject Java_org_rocksdb_TransactionLogIterator_getBatch(
JNIEnv* env, jobject jobj, jlong handle) {
rocksdb::BatchResult batch_result =
reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->GetBatch();
jclass jclazz = env->FindClass(
"org/rocksdb/TransactionLogIterator$BatchResult");
assert(jclazz != nullptr);
jmethodID mid = env->GetMethodID(
jclazz, "<init>", "(Lorg/rocksdb/TransactionLogIterator;JJ)V");
assert(mid != nullptr);
return env->NewObject(jclazz, mid, jobj,
batch_result.sequence, batch_result.writeBatchPtr.release());
return rocksdb::BatchResultJni::construct(env, batch_result);
}

@ -26,9 +26,14 @@
jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env,
jclass jcls, jlong joptions_handle, jstring jdb_path,
jint jttl, jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if(db_path == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
auto* opt = reinterpret_cast<rocksdb::Options*>(joptions_handle);
rocksdb::DBWithTTL* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, &db,
jttl, jread_only);
env->ReleaseStringUTFChars(jdb_path, db_path);
@ -53,49 +58,69 @@ jlongArray
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options,
jintArray jttls, jboolean jread_only) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
const char* db_path = env->GetStringUTFChars(jdb_path, NULL);
std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL);
for(int i = 0; i < len_cols; i++) {
jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL);
const int jcf_name_len = env->GetArrayLength(jcn_ba);
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if(db_path == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
//TODO(AR) do I need to make a copy of jco[i] ?
const jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
if(jco == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
std::string cf_name (reinterpret_cast<char *>(jcf_name), jcf_name_len);
rocksdb::ColumnFamilyOptions* cf_options =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[i]);
column_families.push_back(
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
jboolean has_exception = JNI_FALSE;
rocksdb::JniUtil::byteStrings<std::string>(
env,
jcolumn_names,
[](const char* str_data, const size_t str_len) {
return std::string(str_data, str_len);
},
[&jco, &column_families](size_t idx, std::string cf_name) {
rocksdb::ColumnFamilyOptions* cf_options =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[idx]);
column_families.push_back(
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
},
&has_exception);
env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
env->DeleteLocalRef(jcn);
}
env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
std::vector<rocksdb::ColumnFamilyHandle*> handles;
rocksdb::DBWithTTL* db = nullptr;
if(has_exception == JNI_TRUE) {
// exception occured
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
std::vector<int32_t> ttl_values;
jint* jttlv = env->GetIntArrayElements(jttls, NULL);
jsize len_ttls = env->GetArrayLength(jttls);
for(int i = 0; i < len_ttls; i++) {
jint* jttlv = env->GetIntArrayElements(jttls, nullptr);
if(jttlv == nullptr) {
// exception thrown: OutOfMemoryError
env->ReleaseStringUTFChars(jdb_path, db_path);
return nullptr;
}
const jsize len_ttls = env->GetArrayLength(jttls);
for(jsize i = 0; i < len_ttls; i++) {
ttl_values.push_back(jttlv[i]);
}
env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT);
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
std::vector<rocksdb::ColumnFamilyHandle*> handles;
rocksdb::DBWithTTL* db = nullptr;
rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families,
&handles, &db, ttl_values, jread_only);
// we have now finished with db_path
env->ReleaseStringUTFChars(jdb_path, db_path);
// check if open operation was successful
if (s.ok()) {
jsize resultsLen = 1 + len_cols; //db handle + column family handles
const jsize resultsLen = 1 + len_cols; //db handle + column family handles
std::unique_ptr<jlong[]> results =
std::unique_ptr<jlong[]>(new jlong[resultsLen]);
results[0] = reinterpret_cast<jlong>(db);
@ -104,7 +129,18 @@ jlongArray
}
jlongArray jresults = env->NewLongArray(resultsLen);
if(jresults == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jresults);
return nullptr;
}
return jresults;
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
@ -120,18 +156,23 @@ jlongArray
jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
JNIEnv* env, jobject jobj, jlong jdb_handle,
jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) {
rocksdb::ColumnFamilyHandle* handle;
auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0);
const int len = env->GetArrayLength(jcolumn_name);
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
if(cfname == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
const jsize len = env->GetArrayLength(jcolumn_name);
auto* cfOptions =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
rocksdb::ColumnFamilyHandle* handle;
rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl(
*cfOptions, std::string(reinterpret_cast<char *>(cfname),
len), &handle, jttl);
env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
if (s.ok()) {

@ -30,8 +30,7 @@
*/
jlong Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jclass jcls, jint jreserved_bytes) {
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch(
static_cast<size_t>(jreserved_bytes));
auto* wb = new rocksdb::WriteBatch(static_cast<size_t>(jreserved_bytes));
return reinterpret_cast<jlong>(wb);
}
@ -244,7 +243,9 @@ void Java_org_rocksdb_WriteBatch_iterate(
*/
void Java_org_rocksdb_WriteBatch_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::WriteBatch*>(handle);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(handle);
assert(wb != nullptr);
delete wb;
}
/*
@ -254,9 +255,8 @@ void Java_org_rocksdb_WriteBatch_disposeInternal(
*/
jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
JNIEnv* env, jobject jobj) {
const rocksdb::WriteBatchHandlerJniCallback* h =
new rocksdb::WriteBatchHandlerJniCallback(env, jobj);
return reinterpret_cast<jlong>(h);
auto* wbjnic = new rocksdb::WriteBatchHandlerJniCallback(env, jobj);
return reinterpret_cast<jlong>(wbjnic);
}
/*
@ -266,5 +266,8 @@ jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
*/
void Java_org_rocksdb_WriteBatch_00024Handler_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
delete reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handle);
auto* wbjnic =
reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handle);
assert(wbjnic != nullptr);
delete wbjnic;
}

@ -101,8 +101,18 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
delete mem->Unref();
jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
if(jstate == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()),
reinterpret_cast<const jbyte*>(state.c_str()));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jstate);
return nullptr;
}
return jstate;
}

@ -19,7 +19,7 @@
*/
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
JNIEnv* env, jclass jcls) {
rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex();
auto* wbwi = new rocksdb::WriteBatchWithIndex();
return reinterpret_cast<jlong>(wbwi);
}
@ -30,9 +30,9 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
*/
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
JNIEnv* env, jclass jcls, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi =
auto* wbwi =
new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0,
static_cast<bool>(joverwrite_key));
static_cast<bool>(joverwrite_key));
return reinterpret_cast<jlong>(wbwi);
}
@ -44,10 +44,10 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ(
JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle,
jint jreserved_bytes, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi =
auto* wbwi =
new rocksdb::WriteBatchWithIndex(
reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle),
static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key));
reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle),
static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key));
return reinterpret_cast<jlong>(wbwi);
}
@ -241,7 +241,7 @@ void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0(
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(
JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator();
auto* wbwi_iterator = wbwi->NewIterator();
return reinterpret_cast<jlong>(wbwi_iterator);
}
@ -254,7 +254,7 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle);
auto* wbwi_iterator = wbwi->NewIterator(cf_handle);
return reinterpret_cast<jlong>(wbwi_iterator);
}
@ -362,6 +362,7 @@ jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BIJ(
void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(handle);
assert(wbwi != nullptr);
delete wbwi;
}
@ -375,6 +376,7 @@ void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(
void Java_org_rocksdb_WBWIRocksIterator_disposeInternal(
JNIEnv* env, jobject jobj, jlong handle) {
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
assert(it != nullptr);
delete it;
}
@ -437,7 +439,12 @@ void Java_org_rocksdb_WBWIRocksIterator_seek0(
JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget,
jint jtarget_len) {
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
jbyte* target = env->GetByteArrayElements(jtarget, 0);
jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
if(target == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
rocksdb::Slice target_slice(
reinterpret_cast<char*>(target), jtarget_len);
@ -497,26 +504,41 @@ jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(
results[0] = 0x0;
}
//TODO(AR) do we leak buf and value_buf?
// key_slice and value_slice will be freed by org.rocksdb.DirectSlice#close
//set the pointer to the key slice
char* buf = new char[we.key.size()];
memcpy(buf, we.key.data(), we.key.size());
auto* key_slice = new rocksdb::Slice(buf, we.key.size());
auto* key_slice = new rocksdb::Slice(we.key.data(), we.key.size());
results[1] = reinterpret_cast<jlong>(key_slice);
//set the pointer to the value slice
if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) {
if (we.type == rocksdb::kDeleteRecord
|| we.type == rocksdb::kLogDataRecord) {
// set native handle of value slice to null if no value available
results[2] = 0;
} else {
char* value_buf = new char[we.value.size()];
memcpy(value_buf, we.value.data(), we.value.size());
auto* value_slice = new rocksdb::Slice(value_buf, we.value.size());
auto* value_slice = new rocksdb::Slice(we.value.data(), we.value.size());
results[2] = reinterpret_cast<jlong>(value_slice);
}
jlongArray jresults = env->NewLongArray(3);
if(jresults == nullptr) {
// exception thrown: OutOfMemoryError
if(results[2] != 0) {
auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
delete value_slice;
}
delete key_slice;
return nullptr;
}
env->SetLongArrayRegion(jresults, 0, 3, results);
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jresults);
if(results[2] != 0) {
auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
delete value_slice;
}
delete key_slice;
return nullptr;
}
return jresults;
}

@ -16,69 +16,202 @@ WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback(
// Note: we want to access the Java WriteBatchHandler instance
// across multiple method calls, so we create a global ref
assert(jWriteBatchHandler != nullptr);
m_jWriteBatchHandler = env->NewGlobalRef(jWriteBatchHandler);
if(m_jWriteBatchHandler == nullptr) {
// exception thrown: OutOfMemoryError
return;
}
m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env);
if(m_jPutMethodId == nullptr) {
// exception thrown
return;
}
m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env);
if(m_jMergeMethodId == nullptr) {
// exception thrown
return;
}
m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env);
if(m_jDeleteMethodId == nullptr) {
// exception thrown
return;
}
m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env);
if(m_jLogDataMethodId == nullptr) {
// exception thrown
return;
}
m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env);
if(m_jContinueMethodId == nullptr) {
// exception thrown
return;
}
}
void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) {
const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
const jbyteArray j_value = sliceToJArray(value);
if(j_value == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->CallVoidMethod(
m_jWriteBatchHandler,
m_jPutMethodId,
j_key,
j_value);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_value);
m_env->DeleteLocalRef(j_key);
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
}
void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) {
const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
const jbyteArray j_value = sliceToJArray(value);
if(j_value == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->CallVoidMethod(
m_jWriteBatchHandler,
m_jMergeMethodId,
j_key,
j_value);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_value);
m_env->DeleteLocalRef(j_key);
if(j_value != nullptr) {
m_env->DeleteLocalRef(j_value);
}
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
}
void WriteBatchHandlerJniCallback::Delete(const Slice& key) {
const jbyteArray j_key = sliceToJArray(key);
if(j_key == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
m_env->CallVoidMethod(
m_jWriteBatchHandler,
m_jDeleteMethodId,
j_key);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
return;
}
m_env->DeleteLocalRef(j_key);
if(j_key != nullptr) {
m_env->DeleteLocalRef(j_key);
}
}
void WriteBatchHandlerJniCallback::LogData(const Slice& blob) {
const jbyteArray j_blob = sliceToJArray(blob);
if(j_blob == nullptr) {
// exception thrown
if(m_env->ExceptionCheck()) {
m_env->ExceptionDescribe();
}
return;
}
m_env->CallVoidMethod(
m_jWriteBatchHandler,
m_jLogDataMethodId,
j_blob);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
if(j_blob != nullptr) {
m_env->DeleteLocalRef(j_blob);
}
return;
}
m_env->DeleteLocalRef(j_blob);
if(j_blob != nullptr) {
m_env->DeleteLocalRef(j_blob);
}
}
bool WriteBatchHandlerJniCallback::Continue() {
jboolean jContinue = m_env->CallBooleanMethod(
m_jWriteBatchHandler,
m_jContinueMethodId);
if(m_env->ExceptionCheck()) {
// exception thrown
m_env->ExceptionDescribe();
}
return static_cast<bool>(jContinue == JNI_TRUE);
}
@ -89,16 +222,36 @@ bool WriteBatchHandlerJniCallback::Continue() {
* When calling this function
* you must remember to call env->DeleteLocalRef
* on the result after you have finished with it
*
* @param s A Slice to convery to a Java byte array
*
* @return A reference to a Java byte array, or a nullptr if an
* exception occurs
*/
jbyteArray WriteBatchHandlerJniCallback::sliceToJArray(const Slice& s) {
jbyteArray ja = m_env->NewByteArray(static_cast<jsize>(s.size()));
if(ja == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
m_env->SetByteArrayRegion(
ja, 0, static_cast<jsize>(s.size()),
reinterpret_cast<const jbyte*>(s.data()));
if(m_env->ExceptionCheck()) {
if(ja != nullptr) {
m_env->DeleteLocalRef(ja);
}
// exception thrown: ArrayIndexOutOfBoundsException
return nullptr;
}
return ja;
}
WriteBatchHandlerJniCallback::~WriteBatchHandlerJniCallback() {
m_env->DeleteGlobalRef(m_jWriteBatchHandler);
if(m_jWriteBatchHandler != nullptr) {
m_env->DeleteGlobalRef(m_jWriteBatchHandler);
}
}
} // namespace rocksdb

@ -13,13 +13,14 @@ public class RocksDBColumnFamilySample {
RocksDB.loadLibrary();
}
public static void main(String[] args) throws RocksDBException {
public static void main(final String[] args) throws RocksDBException {
if (args.length < 1) {
System.out.println(
"usage: RocksDBColumnFamilySample db_path");
return;
System.exit(-1);
}
String db_path = args[0];
final String db_path = args[0];
System.out.println("RocksDBColumnFamilySample");
try(final Options options = new Options().setCreateIfMissing(true);
@ -54,8 +55,6 @@ public class RocksDBColumnFamilySample {
// put and get from non-default column family
db.put(columnFamilyHandles.get(0), new WriteOptions(),
"key".getBytes(), "value".getBytes());
String value = new String(db.get(columnFamilyHandles.get(0),
"key".getBytes()));
// atomic write
try (final WriteBatch wb = new WriteBatch()) {

@ -12,31 +12,31 @@ import java.util.ArrayList;
import org.rocksdb.*;
import org.rocksdb.util.SizeUnit;
import java.io.IOException;
public class RocksDBSample {
static {
RocksDB.loadLibrary();
}
public static void main(String[] args) {
public static void main(final String[] args) {
if (args.length < 1) {
System.out.println("usage: RocksDBSample db_path");
return;
System.exit(-1);
}
String db_path = args[0];
String db_path_not_found = db_path + "_not_found";
final String db_path = args[0];
final String db_path_not_found = db_path + "_not_found";
System.out.println("RocksDBSample");
try (final Options options = new Options();
final Filter bloomFilter = new BloomFilter(10);
final ReadOptions readOptions = new ReadOptions()
.setFillCache(false)) {
.setFillCache(false);
final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) {
try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
assert (false);
} catch (RocksDBException e) {
System.out.format("caught the expected exception -- %s\n", e);
} catch (final RocksDBException e) {
System.out.format("Caught the expected exception -- %s\n", e);
}
try {
@ -47,11 +47,11 @@ public class RocksDBSample {
.setMaxBackgroundCompactions(10)
.setCompressionType(CompressionType.SNAPPY_COMPRESSION)
.setCompactionStyle(CompactionStyle.UNIVERSAL);
} catch (IllegalArgumentException e) {
} catch (final IllegalArgumentException e) {
assert (false);
}
Statistics stats = options.statisticsPtr();
final Statistics stats = options.statisticsPtr();
assert (options.createIfMissing() == true);
assert (options.writeBufferSize() == 8 * SizeUnit.KB);
@ -85,9 +85,7 @@ public class RocksDBSample {
options.setAllowMmapReads(true);
assert (options.tableFactoryName().equals("PlainTable"));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000,
10000, 10));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000));
options.setRateLimiter(rateLimiter);
final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB)
@ -114,12 +112,14 @@ public class RocksDBSample {
try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
final byte[] value = db.get("hello".getBytes());
assert ("world".equals(new String(value)));
String str = db.getProperty("rocksdb.stats");
final String str = db.getProperty("rocksdb.stats");
assert (str != null && !str.equals(""));
} catch (RocksDBException e) {
System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e);
} catch (final RocksDBException e) {
System.out.format("[ERROR] caught the unexpected exception -- %s\n", e);
assert (false);
}
@ -174,8 +174,8 @@ public class RocksDBSample {
value = db.get(readOptions, "world".getBytes());
assert (value == null);
byte[] testKey = "asdf".getBytes();
byte[] testValue =
final byte[] testKey = "asdf".getBytes();
final byte[] testValue =
"asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
db.put(testKey, testValue);
byte[] testResult = db.get(testKey);
@ -187,8 +187,8 @@ public class RocksDBSample {
assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult)));
byte[] insufficientArray = new byte[10];
byte[] enoughArray = new byte[50];
final byte[] insufficientArray = new byte[10];
final byte[] enoughArray = new byte[50];
int len;
len = db.get(testKey, insufficientArray);
assert (len > insufficientArray.length);
@ -220,21 +220,21 @@ public class RocksDBSample {
}
try {
for (TickerType statsType : TickerType.values()) {
for (final TickerType statsType : TickerType.values()) {
stats.getTickerCount(statsType);
}
System.out.println("getTickerCount() passed.");
} catch (Exception e) {
} catch (final Exception e) {
System.out.println("Failed in call to getTickerCount()");
assert (false); //Should never reach here.
}
try {
for (HistogramType histogramType : HistogramType.values()) {
for (final HistogramType histogramType : HistogramType.values()) {
HistogramData data = stats.getHistogramData(histogramType);
}
System.out.println("getHistogramData() passed.");
} catch (Exception e) {
} catch (final Exception e) {
System.out.println("Failed in call to getHistogramData()");
assert (false); //Should never reach here.
}
@ -283,16 +283,16 @@ public class RocksDBSample {
Map<byte[], byte[]> values = db.multiGet(keys);
assert (values.size() == keys.size());
for (byte[] value1 : values.values()) {
for (final byte[] value1 : values.values()) {
assert (value1 != null);
}
values = db.multiGet(new ReadOptions(), keys);
assert (values.size() == keys.size());
for (byte[] value1 : values.values()) {
for (final byte[] value1 : values.values()) {
assert (value1 != null);
}
} catch (RocksDBException e) {
} catch (final RocksDBException e) {
System.err.println(e);
}
}

@ -57,6 +57,20 @@ public abstract class AbstractSlice<T> extends RocksMutableObject {
*/
protected abstract T data0(long handle);
/**
* Drops the specified {@code n}
* number of bytes from the start
* of the backing slice
*
* @param n The number of bytes to drop
*/
public abstract void removePrefix(final int n);
/**
* Clears the backing slice
*/
public abstract void clear();
/**
* Return the length (in bytes) of the data.
*

@ -143,7 +143,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions setMergeOperator(
final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle());
setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
return this;
}

@ -134,15 +134,6 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return paranoidChecks(nativeHandle_);
}
@Override
public DBOptions setRateLimiterConfig(
final RateLimiterConfig config) {
assert(isOwningHandle());
rateLimiterConfig_ = config;
setOldRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
}
@Override
public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle());
@ -650,9 +641,6 @@ public long delayedWriteRate(){
private native void setParanoidChecks(
long handle, boolean paranoidChecks);
private native boolean paranoidChecks(long handle);
@Deprecated
private native void setOldRateLimiter(long handle,
long rateLimiterHandle);
private native void setRateLimiter(long handle,
long rateLimiterHandle);
private native void setLogger(long handle,
@ -750,6 +738,5 @@ public long delayedWriteRate(){
private native long delayedWriteRate(long handle);
int numShardBits_;
RateLimiterConfig rateLimiterConfig_;
RateLimiter rateLimiter_;
}

@ -118,18 +118,6 @@ public interface DBOptionsInterface {
*/
boolean paranoidChecks();
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.
* Default: nullptr
*
* @param config rate limiter config.
* @return the instance of the current Object.
* @deprecated See: {@link #setRateLimiter(RateLimiter)}.
*/
@Deprecated
Object setRateLimiterConfig(RateLimiterConfig config);
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.

@ -18,6 +18,13 @@ import java.nio.ByteBuffer;
public class DirectSlice extends AbstractSlice<ByteBuffer> {
public final static DirectSlice NONE = new DirectSlice();
/**
* Indicates whether we have to free the memory pointed to by the Slice
*/
private final boolean internalBuffer;
private volatile boolean cleared = false;
private volatile long internalBufferOffset = 0;
/**
* Called from JNI to construct a new Java DirectSlice
* without an underlying C++ object set
@ -32,6 +39,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/
DirectSlice() {
super();
this.internalBuffer = false;
}
/**
@ -43,6 +51,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/
public DirectSlice(final String str) {
super(createNewSliceFromString(str));
this.internalBuffer = true;
}
/**
@ -55,6 +64,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/
public DirectSlice(final ByteBuffer data, final int length) {
super(createNewDirectSlice0(ensureDirect(data), length));
this.internalBuffer = false;
}
/**
@ -66,12 +76,13 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*/
public DirectSlice(final ByteBuffer data) {
super(createNewDirectSlice1(ensureDirect(data)));
this.internalBuffer = false;
}
private static ByteBuffer ensureDirect(final ByteBuffer data) {
// TODO(AR) consider throwing a checked exception, as if it's not direct
// this can SIGSEGV
assert(data.isDirect());
if(!data.isDirect()) {
throw new IllegalArgumentException("The ByteBuffer must be direct");
}
return data;
}
@ -83,26 +94,29 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
*
* @return the requested byte
*/
public byte get(int offset) {
public byte get(final int offset) {
return get0(getNativeHandle(), offset);
}
/**
* Clears the backing slice
*/
@Override
public void clear() {
clear0(getNativeHandle());
clear0(getNativeHandle(), !cleared && internalBuffer, internalBufferOffset);
cleared = true;
}
/**
* Drops the specified {@code n}
* number of bytes from the start
* of the backing slice
*
* @param n The number of bytes to drop
*/
@Override
public void removePrefix(final int n) {
removePrefix0(getNativeHandle(), n);
this.internalBufferOffset += n;
}
@Override
protected void disposeInternal() {
final long nativeHandle = getNativeHandle();
if(!cleared && internalBuffer) {
disposeInternalBuf(nativeHandle, internalBufferOffset);
}
disposeInternal(nativeHandle);
}
private native static long createNewDirectSlice0(final ByteBuffer data,
@ -110,6 +124,9 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
private native static long createNewDirectSlice1(final ByteBuffer data);
@Override protected final native ByteBuffer data0(long handle);
private native byte get0(long handle, int offset);
private native void clear0(long handle);
private native void clear0(long handle, boolean internalBuffer,
long internalBufferOffset);
private native void removePrefix0(long handle, int length);
private native void disposeInternalBuf(final long handle,
long internalBufferOffset);
}

@ -134,15 +134,15 @@ public class EnvOptions extends RocksObject {
return writableFileMaxBufferSize(nativeHandle_);
}
public EnvOptions setRateLimiterConfig(final RateLimiterConfig rateLimiterConfig) {
this.rateLimiterConfig = rateLimiterConfig;
setRateLimiter(nativeHandle_, rateLimiterConfig.newRateLimiterHandle());
public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
this.rateLimiter = rateLimiter;
setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
return this;
}
public RateLimiterConfig rateLimiterConfig() {
public RateLimiter rateLimiter() {
assert(isOwningHandle());
return rateLimiterConfig;
return rateLimiter;
}
private native static long newEnvOptions();
@ -203,5 +203,5 @@ public class EnvOptions extends RocksObject {
private native void setRateLimiter(final long handle, final long rateLimiterHandle);
private RateLimiterConfig rateLimiterConfig;
private RateLimiter rateLimiter;
}

@ -1,68 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*
* @see RateLimiterConfig
* @deprecated obsolete. See: {@link org.rocksdb.RateLimiter}.
*/
@Deprecated
public class GenericRateLimiterConfig extends RateLimiterConfig {
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
private static final int DEFAULT_FAIRNESS = 10;
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
* @param refillPeriodMicros this controls how often tokens are refilled. For example,
* when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
* 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
* burstier writes while smaller value introduces more CPU overhead.
* The default should work for most cases.
* @param fairness RateLimiter accepts high-pri requests and low-pri requests.
* A low-pri request is usually blocked in favor of hi-pri request. Currently,
* RocksDB assigns low-pri to request from compaction and high-pri to request
* from flush. Low-pri requests can get blocked if flush requests come in
* continuously. This fairness parameter grants low-pri requests permission by
* fairness chance even though high-pri requests exist to avoid starvation.
* You should be good by leaving it at default 10.
*/
public GenericRateLimiterConfig(final long rateBytesPerSecond,
final long refillPeriodMicros, final int fairness) {
rateBytesPerSecond_ = rateBytesPerSecond;
refillPeriodMicros_ = refillPeriodMicros;
fairness_ = fairness;
}
/**
* GenericRateLimiterConfig constructor
*
* @param rateBytesPerSecond this is the only parameter you want to set
* most of the time. It controls the total write rate of compaction
* and flush in bytes per second. Currently, RocksDB does not enforce
* rate limit for anything other than flush and compaction, e.g. write to WAL.
*/
public GenericRateLimiterConfig(final long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
}
@Override protected long newRateLimiterHandle() {
return newRateLimiterHandle(rateBytesPerSecond_, refillPeriodMicros_,
fairness_);
}
private native long newRateLimiterHandle(long rateBytesPerSecond,
long refillPeriodMicros, int fairness);
private final long rateBytesPerSecond_;
private final long refillPeriodMicros_;
private final int fairness_;
}

@ -10,6 +10,8 @@ package org.rocksdb;
* two merge operands held under the same key in order to obtain a single
* value.
*/
public interface MergeOperator {
long newMergeOperatorHandle();
public abstract class MergeOperator extends RocksObject {
protected MergeOperator(final long nativeHandle) {
super(nativeHandle);
}
}

@ -49,6 +49,10 @@ public class MutableColumnFamilyOptions {
* For int[] values, each int should be separated by a comma, e.g.
*
* key1=value1;intArrayKey1=1,2,3
*
* @param str The string representation of the mutable column family options
*
* @return A builder for the mutable column family options
*/
public static MutableColumnFamilyOptionsBuilder parse(final String str) {
Objects.requireNonNull(str);

@ -188,7 +188,7 @@ public class Options extends RocksObject
@Override
public Options setMergeOperator(final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle());
setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
return this;
}
@ -683,13 +683,6 @@ public class Options extends RocksObject
return this;
}
@Override
public Options setRateLimiterConfig(final RateLimiterConfig config) {
rateLimiterConfig_ = config;
setOldRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
}
@Override
public Options setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle());
@ -1202,9 +1195,6 @@ public class Options extends RocksObject
private native void setParanoidChecks(
long handle, boolean paranoidChecks);
private native boolean paranoidChecks(long handle);
@Deprecated
private native void setOldRateLimiter(long handle,
long rateLimiterHandle);
private native void setRateLimiter(long handle,
long rateLimiterHandle);
private native void setLogger(long handle,
@ -1436,7 +1426,6 @@ public class Options extends RocksObject
Env env_;
MemTableConfig memTableConfig_;
TableFormatConfig tableFormatConfig_;
RateLimiterConfig rateLimiterConfig_;
RateLimiter rateLimiter_;
AbstractComparator<? extends AbstractSlice<?>> comparator_;
}

@ -1,26 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*
* @deprecated obsolete. See: {@link org.rocksdb.RateLimiter}.
*/
@Deprecated
public abstract class RateLimiterConfig {
/**
* This function should only be called by
* {@link org.rocksdb.DBOptions#setRateLimiter(long, long)}, which will
* create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
* with a Java RateLimiterConfig.
*
* @see org.rocksdb.DBOptions#setRateLimiter(long, long)
*
* @return native handle address to rate limiter instance.
*/
abstract protected long newRateLimiterHandle();
}

@ -520,11 +520,11 @@ public class RocksDB extends RocksObject {
* to make this lighter weight is to avoid doing any IOs.
*
* @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final byte[] key, final StringBuffer value) {
public boolean keyMayExist(final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length, value);
}
@ -537,12 +537,12 @@ public class RocksDB extends RocksObject {
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final StringBuffer value) {
final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, key, 0, key.length,
columnFamilyHandle.nativeHandle_, value);
}
@ -556,12 +556,12 @@ public class RocksDB extends RocksObject {
*
* @param readOptions {@link ReadOptions} instance
* @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final byte[] key, final StringBuffer value) {
final byte[] key, final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, value);
}
@ -576,13 +576,13 @@ public class RocksDB extends RocksObject {
* @param readOptions {@link ReadOptions} instance
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param key byte array of a key to search for
* @param value StringBuffer instance which is a out parameter if a value is
* @param value StringBuilder instance which is a out parameter if a value is
* found in block-cache.
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final ReadOptions readOptions,
final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final StringBuffer value) {
final StringBuilder value) {
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, 0, key.length, columnFamilyHandle.nativeHandle_,
value);
@ -685,6 +685,9 @@ public class RocksDB extends RocksObject {
columnFamilyHandle.nativeHandle_);
}
// TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
// when we could communicate better status into, also the C++ code show that -2 could be returned
/**
* Get the value associated with the specified key within column family*
* @param key the key to retrieve the value.
@ -1917,6 +1920,8 @@ public class RocksDB extends RocksObject {
* This function will wait until all currently running background processes
* finish. After it returns, no background process will be run until
* {@link #continueBackgroundWork()} is called
*
* @throws RocksDBException If an error occurs when pausing background work
*/
public void pauseBackgroundWork() throws RocksDBException {
pauseBackgroundWork(nativeHandle_);
@ -1925,6 +1930,8 @@ public class RocksDB extends RocksObject {
/**
* Resumes backround work which was suspended by
* previously calling {@link #pauseBackgroundWork()}
*
* @throws RocksDBException If an error occurs when resuming background work
*/
public void continueBackgroundWork() throws RocksDBException {
continueBackgroundWork(nativeHandle_);
@ -2182,17 +2189,17 @@ public class RocksDB extends RocksObject {
long wbwiHandle) throws RocksDBException;
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength,
final StringBuffer stringBuffer);
final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength, final long cfHandle,
final StringBuffer stringBuffer);
final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final StringBuffer stringBuffer);
final int keyLength, final StringBuilder stringBuilder);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final long cfHandle,
final StringBuffer stringBuffer);
final StringBuilder stringBuilder);
protected native void merge(long handle, byte[] key, int keyOffset,
int keyLength, byte[] value, int valueOffset, int valueLength)
throws RocksDBException;

@ -30,6 +30,24 @@ public abstract class RocksMutableObject extends AbstractNativeReference {
this.owningHandle_ = true;
}
/**
* Closes the existing handle, and changes the handle to the new handle
*
* @param newNativeHandle The C++ pointer to the new native object
* @param owningNativeHandle true if we own the new native object
*/
public synchronized void resetNativeHandle(final long newNativeHandle,
final boolean owningNativeHandle) {
close();
setNativeHandle(newNativeHandle, owningNativeHandle);
}
/**
* Sets the handle (C++ pointer) of the underlying C++ native object
*
* @param nativeHandle The C++ pointer to the native object
* @param owningNativeHandle true if we own the native object
*/
public synchronized void setNativeHandle(final long nativeHandle,
final boolean owningNativeHandle) {
this.nativeHandle_ = nativeHandle;

@ -14,6 +14,13 @@ package org.rocksdb;
* values consider using {@link org.rocksdb.DirectSlice}</p>
*/
public class Slice extends AbstractSlice<byte[]> {
/**
* Indicates whether we have to free the memory pointed to by the Slice
*/
private volatile boolean cleared;
private volatile long internalBufferOffset = 0;
/**
* <p>Called from JNI to construct a new Java Slice
* without an underlying C++ object set
@ -27,6 +34,7 @@ public class Slice extends AbstractSlice<byte[]> {
* Slice objects through this, they are not creating underlying C++ Slice
* objects, and so there is nothing to free (dispose) from Java.</p>
*/
@SuppressWarnings("unused")
private Slice() {
super();
}
@ -62,6 +70,18 @@ public class Slice extends AbstractSlice<byte[]> {
super(createNewSlice1(data));
}
@Override
public void clear() {
clear0(getNativeHandle(), !cleared, internalBufferOffset);
cleared = true;
}
@Override
public void removePrefix(final int n) {
removePrefix0(getNativeHandle(), n);
this.internalBufferOffset += n;
}
/**
* <p>Deletes underlying C++ slice pointer
* and any buffered data.</p>
@ -74,7 +94,9 @@ public class Slice extends AbstractSlice<byte[]> {
@Override
protected void disposeInternal() {
final long nativeHandle = getNativeHandle();
disposeInternalBuf(nativeHandle);
if(!cleared) {
disposeInternalBuf(nativeHandle, internalBufferOffset);
}
super.disposeInternal(nativeHandle);
}
@ -82,5 +104,9 @@ public class Slice extends AbstractSlice<byte[]> {
private native static long createNewSlice0(final byte[] data,
final int length);
private native static long createNewSlice1(final byte[] data);
private native void disposeInternalBuf(final long handle);
private native void clear0(long handle, boolean internalBuffer,
long internalBufferOffset);
private native void removePrefix0(long handle, int length);
private native void disposeInternalBuf(final long handle,
long internalBufferOffset);
}

@ -9,9 +9,11 @@ package org.rocksdb;
* StringAppendOperator is a merge operator that concatenates
* two strings.
*/
public class StringAppendOperator implements MergeOperator {
@Override public long newMergeOperatorHandle() {
return newMergeOperatorHandleImpl();
public class StringAppendOperator extends MergeOperator {
public StringAppendOperator() {
super(newSharedStringAppendOperator());
}
private native long newMergeOperatorHandleImpl();
private native static long newSharedStringAppendOperator();
@Override protected final native void disposeInternal(final long handle);
}

@ -65,7 +65,7 @@ public class TransactionLogIterator extends RocksObject {
* by a TransactionLogIterator containing a sequence
* number and a {@link WriteBatch} instance.</p>
*/
public final class BatchResult {
public static final class BatchResult {
/**
* <p>Constructor of BatchResult class.</p>
*

@ -29,12 +29,11 @@ public class WBWIRocksIterator
*/
public WriteEntry entry() {
assert(isOwningHandle());
assert(entry != null);
final long ptrs[] = entry1(nativeHandle_);
entry.type = WriteType.fromId((byte)ptrs[0]);
entry.key.setNativeHandle(ptrs[1], true);
entry.value.setNativeHandle(ptrs[2], ptrs[2] != 0);
entry.key.resetNativeHandle(ptrs[1], ptrs[1] != 0);
entry.value.resetNativeHandle(ptrs[2], ptrs[2] != 0);
return entry;
}
@ -75,6 +74,12 @@ public class WBWIRocksIterator
}
}
@Override
public void close() {
entry.close();
super.close();
}
/**
* Represents an entry returned by
* {@link org.rocksdb.WBWIRocksIterator#entry()}
@ -84,7 +89,7 @@ public class WBWIRocksIterator
* or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG}
* will not have a value.
*/
public static class WriteEntry {
public static class WriteEntry implements AutoCloseable {
WriteType type = null;
final DirectSlice key;
final DirectSlice value;
@ -101,7 +106,8 @@ public class WBWIRocksIterator
value = new DirectSlice();
}
public WriteEntry(WriteType type, DirectSlice key, DirectSlice value) {
public WriteEntry(final WriteType type, final DirectSlice key,
final DirectSlice value) {
this.type = type;
this.key = key;
this.value = value;
@ -154,7 +160,7 @@ public class WBWIRocksIterator
}
@Override
public boolean equals(Object other) {
public boolean equals(final Object other) {
if(other == null) {
return false;
} else if (this == other) {
@ -168,5 +174,11 @@ public class WBWIRocksIterator
return false;
}
}
@Override
public void close() {
value.close();
key.close();
}
}
}

@ -144,6 +144,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @param options The database options to use
* @param key The key to read the value for
*
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the batch does not have enough data to resolve
* Merge operations, MergeInProgress status may be returned.
*/
@ -160,6 +163,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @param options The database options to use
* @param key The key to read the value for
*
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the batch does not have enough data to resolve
* Merge operations, MergeInProgress status may be returned.
*/
@ -181,10 +187,14 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* (the keys in this batch do not yet belong to any snapshot and will be
* fetched regardless).
*
* @param db The Rocks database
* @param columnFamilyHandle The column family to retrieve the value from
* @param options The read options to use
* @param key The key to read the value for
*
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the value for the key cannot be read
*/
public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle,
@ -207,9 +217,13 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* (the keys in this batch do not yet belong to any snapshot and will be
* fetched regardless).
*
* @param db The Rocks database
* @param options The read options to use
* @param key The key to read the value for
*
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @throws RocksDBException if the value for the key cannot be read
*/
public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options,

@ -203,8 +203,9 @@ public class ColumnFamilyTest {
@Test
public void writeBatch() throws RocksDBException {
try (final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions()
.setMergeOperator(new StringAppendOperator())) {
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator)) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
defaultCfOptions),

@ -388,25 +388,11 @@ public class DBOptionsTest {
}
}
@Test
public void rateLimiterConfig() {
try(final DBOptions options = new DBOptions();
final DBOptions anotherOptions = new DBOptions()) {
final RateLimiterConfig rateLimiterConfig =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
options.setRateLimiterConfig(rateLimiterConfig);
// Test with parameter initialization
anotherOptions.setRateLimiterConfig(
new GenericRateLimiterConfig(1000));
}
}
@Test
public void rateLimiter() {
try(final DBOptions options = new DBOptions();
final DBOptions anotherOptions = new DBOptions()) {
final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1);
final DBOptions anotherOptions = new DBOptions();
final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1)) {
options.setRateLimiter(rateLimiter);
// Test with parameter initialization
anotherOptions.setRateLimiter(

@ -54,7 +54,7 @@ public class DirectSliceTest {
}
}
@Test(expected = AssertionError.class)
@Test(expected = IllegalArgumentException.class)
public void directSliceInitWithoutDirectAllocation() {
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data);
@ -63,7 +63,7 @@ public class DirectSliceTest {
}
}
@Test(expected = AssertionError.class)
@Test(expected = IllegalArgumentException.class)
public void directSlicePrefixInitWithoutDirectAllocation() {
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data);
@ -71,4 +71,23 @@ public class DirectSliceTest {
//no-op
}
}
@Test
public void directSliceClear() {
try(final DirectSlice directSlice = new DirectSlice("abc")) {
assertThat(directSlice.toString()).isEqualTo("abc");
directSlice.clear();
assertThat(directSlice.toString()).isEmpty();
directSlice.clear(); // make sure we don't double-free
}
}
@Test
public void directSliceRemovePrefix() {
try(final DirectSlice directSlice = new DirectSlice("abc")) {
assertThat(directSlice.toString()).isEqualTo("abc");
directSlice.removePrefix(1);
assertThat(directSlice.toString()).isEqualTo("bc");
}
}
}

@ -118,16 +118,16 @@ public class EnvOptionsTest {
}
@Test
public void rateLimiterConfig() {
try (final EnvOptions envOptions = new EnvOptions()) {
final RateLimiterConfig rateLimiterConfig1 =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
envOptions.setRateLimiterConfig(rateLimiterConfig1);
assertThat(envOptions.rateLimiterConfig()).isEqualTo(rateLimiterConfig1);
final RateLimiterConfig rateLimiterConfig2 = new GenericRateLimiterConfig(1000);
envOptions.setRateLimiterConfig(rateLimiterConfig2);
assertThat(envOptions.rateLimiterConfig()).isEqualTo(rateLimiterConfig2);
public void rateLimiter() {
try (final EnvOptions envOptions = new EnvOptions();
final RateLimiter rateLimiter1 = new RateLimiter(1000, 100 * 1000, 1)) {
envOptions.setRateLimiter(rateLimiter1);
assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter1);
try(final RateLimiter rateLimiter2 = new RateLimiter(1000)) {
envOptions.setRateLimiter(rateLimiter2);
assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter2);
}
}
}
}

@ -43,21 +43,21 @@ public class KeyMayExistTest {
isEqualTo(2);
db.put("key".getBytes(), "value".getBytes());
// Test without column family
StringBuffer retValue = new StringBuffer();
StringBuilder retValue = new StringBuilder();
boolean exists = db.keyMayExist("key".getBytes(), retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
// Test without column family but with readOptions
try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer();
retValue = new StringBuilder();
exists = db.keyMayExist(readOptions, "key".getBytes(), retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
}
// Test with column family
retValue = new StringBuffer();
retValue = new StringBuilder();
exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
@ -65,7 +65,7 @@ public class KeyMayExistTest {
// Test with column family and readOptions
try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer();
retValue = new StringBuilder();
exists = db.keyMayExist(readOptions,
columnFamilyHandleList.get(0), "key".getBytes(),
retValue);

@ -89,11 +89,10 @@ public class MergeTest {
@Test
public void operatorOption()
throws InterruptedException, RocksDBException {
final StringAppendOperator stringAppendOperator =
new StringAppendOperator();
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// Writing aa under key
@ -112,10 +111,9 @@ public class MergeTest {
@Test
public void cFOperatorOption()
throws InterruptedException, RocksDBException {
final StringAppendOperator stringAppendOperator =
new StringAppendOperator();
try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator);
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator);
final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator)
) {
@ -175,42 +173,43 @@ public class MergeTest {
@Test
public void operatorGcBehaviour()
throws RocksDBException {
final StringAppendOperator stringAppendOperator
= new StringAppendOperator();
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator()) {
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test reuse
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test param init
try (final Options opt = new Options()
.setMergeOperator(new StringAppendOperator());
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test reuse
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test replace one with another merge operator instance
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator)) {
final StringAppendOperator newStringAppendOperator
= new StringAppendOperator();
opt.setMergeOperator(newStringAppendOperator);
try (final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// test param init
try (final StringAppendOperator stringAppendOperator2 = new StringAppendOperator();
final Options opt = new Options()
.setMergeOperator(stringAppendOperator2);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test replace one with another merge operator instance
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final StringAppendOperator newStringAppendOperator = new StringAppendOperator()) {
opt.setMergeOperator(newStringAppendOperator);
try (final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
}
}
}

@ -697,16 +697,15 @@ public class OptionsTest {
@Test
public void compressionPerLevel() {
try (final ColumnFamilyOptions columnFamilyOptions =
new ColumnFamilyOptions()) {
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
try (final Options options = new Options()) {
assertThat(options.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList =
new ArrayList<>();
for (int i = 0; i < columnFamilyOptions.numLevels(); i++) {
for (int i = 0; i < options.numLevels(); i++) {
compressionTypeList.add(CompressionType.NO_COMPRESSION);
}
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
options.setCompressionPerLevel(compressionTypeList);
compressionTypeList = options.compressionPerLevel();
for (final CompressionType compressionType : compressionTypeList) {
assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION);
@ -716,19 +715,18 @@ public class OptionsTest {
@Test
public void differentCompressionsPerLevel() {
try (final ColumnFamilyOptions columnFamilyOptions =
new ColumnFamilyOptions()) {
columnFamilyOptions.setNumLevels(3);
try (final Options options = new Options()) {
options.setNumLevels(3);
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
assertThat(options.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>();
compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
options.setCompressionPerLevel(compressionTypeList);
compressionTypeList = options.compressionPerLevel();
assertThat(compressionTypeList.size()).isEqualTo(3);
assertThat(compressionTypeList).
@ -767,26 +765,12 @@ public class OptionsTest {
}
}
@Test
public void rateLimiterConfig() {
try (final Options options = new Options();
final Options anotherOptions = new Options()) {
final RateLimiterConfig rateLimiterConfig =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
options.setRateLimiterConfig(rateLimiterConfig);
// Test with parameter initialization
anotherOptions.setRateLimiterConfig(
new GenericRateLimiterConfig(1000));
}
}
@Test
public void rateLimiter() {
try (final Options options = new Options();
final Options anotherOptions = new Options()) {
final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1);
final Options anotherOptions = new Options();
final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
options.setRateLimiter(rateLimiter);
// Test with parameter initialization
anotherOptions.setRateLimiter(
@ -810,7 +794,6 @@ public class OptionsTest {
}
}
@Test
public void shouldTestMemTableFactoryName()
throws RocksDBException {

@ -0,0 +1,49 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.ClassRule;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class RateLimiterTest {
@ClassRule
public static final RocksMemoryResource rocksMemoryResource =
new RocksMemoryResource();
@Test
public void setBytesPerSecond() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
rateLimiter.setBytesPerSecond(2000);
}
}
@Test
public void getSingleBurstBytes() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getSingleBurstBytes()).isEqualTo(100);
}
}
@Test
public void getTotalBytesThrough() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getTotalBytesThrough()).isEqualTo(0);
}
}
@Test
public void getTotalRequests() {
try(final RateLimiter rateLimiter =
new RateLimiter(1000, 100 * 1000, 1)) {
assertThat(rateLimiter.getTotalRequests()).isEqualTo(0);
}
}
}

@ -73,8 +73,10 @@ public class RocksDBTest {
@Test
public void write() throws RocksDBException {
try (final Options options = new Options().setMergeOperator(
new StringAppendOperator()).setCreateIfMissing(true);
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final Options options = new Options()
.setMergeOperator(stringAppendOperator)
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
final WriteOptions opts = new WriteOptions()) {
@ -182,9 +184,10 @@ public class RocksDBTest {
@Test
public void merge() throws RocksDBException {
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(new StringAppendOperator());
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final WriteOptions wOpt = new WriteOptions();
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())

@ -32,6 +32,25 @@ public class SliceTest {
}
}
@Test
public void sliceClear() {
try (final Slice slice = new Slice("abc")) {
assertThat(slice.toString()).isEqualTo("abc");
slice.clear();
assertThat(slice.toString()).isEmpty();
slice.clear(); // make sure we don't double-free
}
}
@Test
public void sliceRemovePrefix() {
try (final Slice slice = new Slice("abc")) {
assertThat(slice.toString()).isEqualTo("abc");
slice.removePrefix(1);
assertThat(slice.toString()).isEqualTo("bc");
}
}
@Test
public void sliceEquals() {
try (final Slice slice = new Slice("abc");

@ -192,16 +192,16 @@ public class WriteBatchWithIndexTest {
final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
buffer.put(zeroByteValue);
WBWIRocksIterator.WriteEntry[] expected = {
final WBWIRocksIterator.WriteEntry expected =
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(buffer, zeroByteValue.length),
new DirectSlice(buffer, zeroByteValue.length))
};
new DirectSlice(buffer, zeroByteValue.length));
try (final WBWIRocksIterator it = wbwi.newIterator()) {
it.seekToFirst();
assertThat(it.entry().equals(expected[0])).isTrue();
assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue();
final WBWIRocksIterator.WriteEntry actual = it.entry();
assertThat(actual.equals(expected)).isTrue();
assertThat(it.entry().hashCode() == expected.hashCode()).isTrue();
}
}
}

@ -31,12 +31,12 @@ public class RocksJunitRunner {
*
* @param system JUnitSystem
*/
public RocksJunitListener(JUnitSystem system) {
public RocksJunitListener(final JUnitSystem system) {
super(system);
}
@Override
public void testStarted(Description description) {
public void testStarted(final Description description) {
System.out.format("Run: %s testing now -> %s \n",
description.getClassName(),
description.getMethodName());
@ -48,21 +48,23 @@ public class RocksJunitRunner {
*
* @param args Test classes as String names
*/
public static void main(String[] args){
JUnitCore runner = new JUnitCore();
public static void main(final String[] args){
final JUnitCore runner = new JUnitCore();
final JUnitSystem system = new RealSystem();
runner.addListener(new RocksJunitListener(system));
try {
List<Class<?>> classes = new ArrayList<>();
for (String arg : args) {
final List<Class<?>> classes = new ArrayList<>();
for (final String arg : args) {
classes.add(Class.forName(arg));
}
final Result result = runner.run(classes.toArray(new Class[1]));
final Class[] clazzes = classes.toArray(new Class[classes.size()]);
final Result result = runner.run(clazzes);
if(!result.wasSuccessful()) {
System.exit(-1);
}
} catch (ClassNotFoundException e) {
} catch (final ClassNotFoundException e) {
e.printStackTrace();
System.exit(-2);
}
}
}

Loading…
Cancel
Save