Merge pull request #980 from adamretter/java-arm

ARM for the Java API
main
Yueh-Hsuan Chiang 9 years ago
commit 51c9464dfc
  1. 23
      java/rocksjni/backupablejni.cc
  2. 14
      java/rocksjni/backupenginejni.cc
  3. 12
      java/rocksjni/comparator.cc
  4. 6
      java/rocksjni/comparatorjnicallback.cc
  5. 12
      java/rocksjni/filter.cc
  6. 12
      java/rocksjni/loggerjnicallback.cc
  7. 131
      java/rocksjni/options.cc
  8. 93
      java/rocksjni/portal.h
  9. 13
      java/rocksjni/remove_emptyvalue_compactionfilterjni.cc
  10. 16
      java/rocksjni/restorejni.cc
  11. 574
      java/rocksjni/rocksjni.cc
  12. 48
      java/rocksjni/slice.cc
  13. 171
      java/rocksjni/ttl.cc
  14. 90
      java/rocksjni/write_batch.cc
  15. 26
      java/rocksjni/write_batch_test.cc
  16. 183
      java/rocksjni/write_batch_with_index.cc
  17. 82
      java/samples/src/main/java/RocksDBColumnFamilySample.java
  18. 510
      java/samples/src/main/java/RocksDBSample.java
  19. 19
      java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
  20. 16
      java/src/main/java/org/rocksdb/AbstractComparator.java
  21. 66
      java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
  22. 76
      java/src/main/java/org/rocksdb/AbstractNativeReference.java
  23. 23
      java/src/main/java/org/rocksdb/AbstractRocksIterator.java
  24. 56
      java/src/main/java/org/rocksdb/AbstractSlice.java
  25. 87
      java/src/main/java/org/rocksdb/AbstractWriteBatch.java
  26. 63
      java/src/main/java/org/rocksdb/BackupEngine.java
  27. 35
      java/src/main/java/org/rocksdb/BackupableDB.java
  28. 119
      java/src/main/java/org/rocksdb/BackupableDBOptions.java
  29. 16
      java/src/main/java/org/rocksdb/BloomFilter.java
  30. 18
      java/src/main/java/org/rocksdb/Checkpoint.java
  31. 23
      java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
  32. 90
      java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
  33. 12
      java/src/main/java/org/rocksdb/Comparator.java
  34. 16
      java/src/main/java/org/rocksdb/ComparatorOptions.java
  35. 153
      java/src/main/java/org/rocksdb/DBOptions.java
  36. 13
      java/src/main/java/org/rocksdb/DirectComparator.java
  37. 45
      java/src/main/java/org/rocksdb/DirectSlice.java
  38. 4
      java/src/main/java/org/rocksdb/Env.java
  39. 12
      java/src/main/java/org/rocksdb/Filter.java
  40. 15
      java/src/main/java/org/rocksdb/FlushOptions.java
  41. 17
      java/src/main/java/org/rocksdb/Logger.java
  42. 201
      java/src/main/java/org/rocksdb/Options.java
  43. 27
      java/src/main/java/org/rocksdb/ReadOptions.java
  44. 8
      java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
  45. 30
      java/src/main/java/org/rocksdb/RestoreBackupableDB.java
  46. 27
      java/src/main/java/org/rocksdb/RestoreOptions.java
  47. 367
      java/src/main/java/org/rocksdb/RocksDB.java
  48. 6
      java/src/main/java/org/rocksdb/RocksEnv.java
  49. 6
      java/src/main/java/org/rocksdb/RocksIterator.java
  50. 10
      java/src/main/java/org/rocksdb/RocksMemEnv.java
  51. 69
      java/src/main/java/org/rocksdb/RocksMutableObject.java
  52. 122
      java/src/main/java/org/rocksdb/RocksObject.java
  53. 22
      java/src/main/java/org/rocksdb/Slice.java
  54. 8
      java/src/main/java/org/rocksdb/Snapshot.java
  55. 9
      java/src/main/java/org/rocksdb/TransactionLogIterator.java
  56. 68
      java/src/main/java/org/rocksdb/TtlDB.java
  57. 49
      java/src/main/java/org/rocksdb/WBWIRocksIterator.java
  58. 60
      java/src/main/java/org/rocksdb/WriteBatch.java
  59. 109
      java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
  60. 11
      java/src/main/java/org/rocksdb/WriteOptions.java
  61. 206
      java/src/test/java/org/rocksdb/AbstractComparatorTest.java
  62. 231
      java/src/test/java/org/rocksdb/BackupEngineTest.java
  63. 230
      java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java
  64. 457
      java/src/test/java/org/rocksdb/BackupableDBTest.java
  65. 24
      java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
  66. 91
      java/src/test/java/org/rocksdb/CheckPointTest.java
  67. 459
      java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
  68. 1059
      java/src/test/java/org/rocksdb/ColumnFamilyTest.java
  69. 9
      java/src/test/java/org/rocksdb/ComparatorOptionsTest.java
  70. 219
      java/src/test/java/org/rocksdb/ComparatorTest.java
  71. 5
      java/src/test/java/org/rocksdb/CompressionOptionsTest.java
  72. 412
      java/src/test/java/org/rocksdb/DBOptionsTest.java
  73. 70
      java/src/test/java/org/rocksdb/DirectSliceTest.java
  74. 42
      java/src/test/java/org/rocksdb/FilterTest.java
  75. 56
      java/src/test/java/org/rocksdb/FlushTest.java
  76. 66
      java/src/test/java/org/rocksdb/InfoLogLevelTest.java
  77. 110
      java/src/test/java/org/rocksdb/KeyMayExistTest.java
  78. 260
      java/src/test/java/org/rocksdb/LoggerTest.java
  79. 34
      java/src/test/java/org/rocksdb/MemTableTest.java
  80. 359
      java/src/test/java/org/rocksdb/MergeTest.java
  81. 57
      java/src/test/java/org/rocksdb/MixedOptionsTest.java
  82. 2
      java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java
  83. 743
      java/src/test/java/org/rocksdb/OptionsTest.java
  84. 10
      java/src/test/java/org/rocksdb/PlainTableConfigTest.java
  85. 2
      java/src/test/java/org/rocksdb/PlatformRandomHelper.java
  86. 492
      java/src/test/java/org/rocksdb/ReadOnlyTest.java
  87. 128
      java/src/test/java/org/rocksdb/ReadOptionsTest.java
  88. 849
      java/src/test/java/org/rocksdb/RocksDBTest.java
  89. 35
      java/src/test/java/org/rocksdb/RocksEnvTest.java
  90. 68
      java/src/test/java/org/rocksdb/RocksIteratorTest.java
  91. 162
      java/src/test/java/org/rocksdb/RocksMemEnvTest.java
  92. 4
      java/src/test/java/org/rocksdb/RocksMemoryResource.java
  93. 68
      java/src/test/java/org/rocksdb/SliceTest.java
  94. 288
      java/src/test/java/org/rocksdb/SnapshotTest.java
  95. 32
      java/src/test/java/org/rocksdb/StatisticsCollectorTest.java
  96. 210
      java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java
  97. 160
      java/src/test/java/org/rocksdb/TtlDBTest.java
  98. 83
      java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java
  99. 150
      java/src/test/java/org/rocksdb/WriteBatchTest.java
  100. 230
      java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
  101. Some files were not shown because too many files have changed in this diff Show More

@ -21,17 +21,17 @@
/*
* Class: org_rocksdb_BackupableDB
* Method: open
* Signature: (JJ)V
* Signature: (JJ)J
*/
void Java_org_rocksdb_BackupableDB_open(
JNIEnv* env, jobject jbdb, jlong jdb_handle, jlong jopt_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto opt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jopt_handle);
jlong Java_org_rocksdb_BackupableDB_open(
JNIEnv* env, jclass jcls, jlong jdb_handle, jlong jopt_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* opt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jopt_handle);
auto bdb = new rocksdb::BackupableDB(db, *opt);
// as BackupableDB extends RocksDB on the java side, we can reuse
// the RocksDB portal here.
rocksdb::RocksDBJni::setHandle(env, jbdb, bdb);
return reinterpret_cast<jlong>(bdb);
}
/*
@ -135,14 +135,14 @@ void Java_org_rocksdb_BackupableDB_garbageCollect(JNIEnv* env,
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: newBackupableDBOptions
* Signature: (Ljava/lang/String;)V
* Signature: (Ljava/lang/String;)J
*/
void Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
JNIEnv* env, jobject jobj, jstring jpath) {
const char* cpath = env->GetStringUTFChars(jpath, 0);
jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
JNIEnv* env, jclass jcls, jstring jpath) {
const char* cpath = env->GetStringUTFChars(jpath, NULL);
auto bopt = new rocksdb::BackupableDBOptions(cpath);
env->ReleaseStringUTFChars(jpath, cpath);
rocksdb::BackupableDBOptionsJni::setHandle(env, jobj, bopt);
return reinterpret_cast<jlong>(bopt);
}
/*
@ -320,5 +320,4 @@ void Java_org_rocksdb_BackupableDBOptions_disposeInternal(
auto bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
assert(bopt);
delete bopt;
rocksdb::BackupableDBOptionsJni::setHandle(env, jopt, nullptr);
}

@ -16,10 +16,10 @@
/*
* Class: org_rocksdb_BackupEngine
* Method: open
* Signature: (JJ)V
* Signature: (JJ)J
*/
void Java_org_rocksdb_BackupEngine_open(
JNIEnv* env, jobject jbe, jlong env_handle,
jlong Java_org_rocksdb_BackupEngine_open(
JNIEnv* env, jclass jcls, jlong env_handle,
jlong backupable_db_options_handle) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(env_handle);
auto* backupable_db_options =
@ -30,11 +30,11 @@ void Java_org_rocksdb_BackupEngine_open(
*backupable_db_options, &backup_engine);
if (status.ok()) {
rocksdb::BackupEngineJni::setHandle(env, jbe, backup_engine);
return;
return reinterpret_cast<jlong>(backup_engine);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
return 0;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
}
/*

@ -36,15 +36,15 @@ void Java_org_rocksdb_AbstractComparator_disposeInternal(
/*
* Class: org_rocksdb_Comparator
* Method: createNewComparator0
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_Comparator_createNewComparator0(
jlong Java_org_rocksdb_Comparator_createNewComparator0(
JNIEnv* env, jobject jobj, jlong copt_handle) {
const rocksdb::ComparatorJniCallbackOptions* copt =
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(copt_handle);
const rocksdb::ComparatorJniCallback* c =
new rocksdb::ComparatorJniCallback(env, jobj, copt);
rocksdb::AbstractComparatorJni::setHandle(env, jobj, c);
return reinterpret_cast<jlong>(c);
}
// </editor-fold>
@ -53,14 +53,14 @@ void Java_org_rocksdb_Comparator_createNewComparator0(
/*
* Class: org_rocksdb_DirectComparator
* Method: createNewDirectComparator0
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_DirectComparator_createNewDirectComparator0(
jlong Java_org_rocksdb_DirectComparator_createNewDirectComparator0(
JNIEnv* env, jobject jobj, jlong copt_handle) {
const rocksdb::ComparatorJniCallbackOptions* copt =
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(copt_handle);
const rocksdb::DirectComparatorJniCallback* c =
new rocksdb::DirectComparatorJniCallback(env, jobj, copt);
rocksdb::AbstractComparatorJni::setHandle(env, jobj, c);
return reinterpret_cast<jlong>(c);
}
// </editor-fold>

@ -60,8 +60,8 @@ int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const {
// performance.
mtx_compare->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceA, &a);
AbstractSliceJni::setHandle(m_env, m_jSliceB, &b);
AbstractSliceJni::setHandle(m_env, m_jSliceA, &a, JNI_FALSE);
AbstractSliceJni::setHandle(m_env, m_jSliceB, &b, JNI_FALSE);
jint result =
m_env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA,
m_jSliceB);
@ -89,7 +89,7 @@ void BaseComparatorJniCallback::FindShortestSeparator(
// performance.
mtx_findShortestSeparator->Lock();
AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit);
AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit, JNI_FALSE);
jstring jsResultStart =
(jstring)m_env->CallObjectMethod(m_jComparator,
m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit);

@ -19,17 +19,17 @@
/*
* Class: org_rocksdb_BloomFilter
* Method: createBloomFilter
* Signature: (IZ)V
* Signature: (IZ)J
*/
void Java_org_rocksdb_BloomFilter_createNewBloomFilter(
JNIEnv* env, jobject jobj, jint bits_per_key,
jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
JNIEnv* env, jclass jcls, jint bits_per_key,
jboolean use_block_base_builder) {
rocksdb::FilterPolicy* fp = const_cast<rocksdb::FilterPolicy *>(
auto* fp = const_cast<rocksdb::FilterPolicy *>(
rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder));
std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
auto* pFilterPolicy =
new std::shared_ptr<rocksdb::FilterPolicy>;
*pFilterPolicy = std::shared_ptr<rocksdb::FilterPolicy>(fp);
rocksdb::FilterJni::setHandle(env, jobj, pFilterPolicy);
return reinterpret_cast<jlong>(pFilterPolicy);
}
/*

@ -125,9 +125,9 @@ LoggerJniCallback::~LoggerJniCallback() {
/*
* Class: org_rocksdb_Logger
* Method: createNewLoggerOptions
* Signature: (J)V
* Signature: (J)J
*/
void Java_org_rocksdb_Logger_createNewLoggerOptions(
jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
JNIEnv* env, jobject jobj, jlong joptions) {
rocksdb::LoggerJniCallback* c =
new rocksdb::LoggerJniCallback(env, jobj);
@ -137,15 +137,15 @@ void Java_org_rocksdb_Logger_createNewLoggerOptions(
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>;
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
rocksdb::LoggerJni::setHandle(env, jobj, pLoggerJniCallback);
return reinterpret_cast<jlong>(pLoggerJniCallback);
}
/*
* Class: org_rocksdb_Logger
* Method: createNewLoggerDbOptions
* Signature: (J)V
* Signature: (J)J
*/
void Java_org_rocksdb_Logger_createNewLoggerDbOptions(
jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
JNIEnv* env, jobject jobj, jlong jdb_options) {
rocksdb::LoggerJniCallback* c =
new rocksdb::LoggerJniCallback(env, jobj);
@ -155,7 +155,7 @@ void Java_org_rocksdb_Logger_createNewLoggerDbOptions(
std::shared_ptr<rocksdb::LoggerJniCallback> *pLoggerJniCallback =
new std::shared_ptr<rocksdb::LoggerJniCallback>;
*pLoggerJniCallback = std::shared_ptr<rocksdb::LoggerJniCallback>(c);
rocksdb::LoggerJni::setHandle(env, jobj, pLoggerJniCallback);
return reinterpret_cast<jlong>(pLoggerJniCallback);
}
/*

@ -36,25 +36,25 @@
/*
* Class: org_rocksdb_Options
* Method: newOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) {
rocksdb::Options* op = new rocksdb::Options();
rocksdb::OptionsJni::setHandle(env, jobj, op);
return reinterpret_cast<jlong>(op);
}
/*
* Class: org_rocksdb_Options
* Method: newOptions
* Signature: (JJ)V
* Signature: (JJ)J
*/
void Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jobject jobj,
jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
jlong jdboptions, jlong jcfoptions) {
auto dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
auto cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(
auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
auto* cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(
jcfoptions);
rocksdb::Options* op = new rocksdb::Options(*dbOpt, *cfOpt);
rocksdb::OptionsJni::setHandle(env, jobj, op);
return reinterpret_cast<jlong>(op);
}
/*
@ -1081,21 +1081,20 @@ jbyte Java_org_rocksdb_Options_compressionType(
* vector.
*/
std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper(
JNIEnv* env, jobject jcompressionLevels) {
JNIEnv* env, jbyteArray jcompressionLevels) {
std::vector<rocksdb::CompressionType> compressionLevels;
// iterate over compressionLevels
jobject iteratorObj = env->CallObjectMethod(
jcompressionLevels, rocksdb::ListJni::getIteratorMethod(env));
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get compression
jobject jcompression_obj = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
jbyte jcompression = env->CallByteMethod(jcompression_obj,
rocksdb::ByteJni::getByteValueMethod(env));
compressionLevels.push_back(static_cast<rocksdb::CompressionType>(
jcompression));
jsize len = env->GetArrayLength(jcompressionLevels);
jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels,
NULL);
for(int i = 0; i < len; i++) {
jbyte jcl;
jcl = jcompressionLevel[i];
compressionLevels.push_back(static_cast<rocksdb::CompressionType>(jcl));
}
env->ReleaseByteArrayElements(jcompressionLevels, jcompressionLevel,
JNI_ABORT);
return compressionLevels;
}
@ -1103,34 +1102,29 @@ std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper(
* Helper method to convert a CompressionType vector to a Java
* List.
*/
jobject rocksdb_compression_list_helper(JNIEnv* env,
jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
std::vector<rocksdb::CompressionType> compressionLevels) {
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jListClazz);
jobject jcompressionLevels = env->NewObject(jListClazz,
midList, compressionLevels.size());
// insert in java list
jbyte jbuf[compressionLevels.size()];
for (std::vector<rocksdb::CompressionType>::size_type i = 0;
i != compressionLevels.size(); i++) {
jclass jByteClazz = env->FindClass("java/lang/Byte");
jmethodID midByte = env->GetMethodID(jByteClazz, "<init>", "(B)V");
jobject obj = env->NewObject(jByteClazz, midByte,
compressionLevels[i]);
env->CallBooleanMethod(jcompressionLevels,
rocksdb::ListJni::getListAddMethodId(env), obj);
jbuf[i] = compressionLevels[i];
}
// insert in java array
jbyteArray jcompressionLevels = env->NewByteArray(
static_cast<jsize>(compressionLevels.size()));
env->SetByteArrayRegion(jcompressionLevels, 0,
static_cast<jsize>(compressionLevels.size()), jbuf);
return jcompressionLevels;
}
/*
* Class: org_rocksdb_Options
* Method: setCompressionPerLevel
* Signature: (JLjava/util/List;)V
* Signature: (J[B)V
*/
void Java_org_rocksdb_Options_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jobject jcompressionLevels) {
jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
@ -1140,9 +1134,9 @@ void Java_org_rocksdb_Options_setCompressionPerLevel(
/*
* Class: org_rocksdb_Options
* Method: compressionPerLevel
* Signature: (J)Ljava/util/List;
* Signature: (J)[B
*/
jobject Java_org_rocksdb_Options_compressionPerLevel(
jbyteArray Java_org_rocksdb_Options_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb_compression_list_helper(env,
@ -1932,12 +1926,12 @@ void Java_org_rocksdb_Options_prepareForBulkLoad(
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: newColumnFamilyOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
JNIEnv* env, jclass jcls) {
rocksdb::ColumnFamilyOptions* op = new rocksdb::ColumnFamilyOptions();
rocksdb::ColumnFamilyOptionsJni::setHandle(env, jobj, op);
return reinterpret_cast<jlong>(op);
}
/*
@ -2285,11 +2279,11 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setCompressionPerLevel
* Signature: (JLjava/util/List;)V
* Signature: (J[B)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jobject jcompressionLevels) {
jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
@ -2299,9 +2293,9 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: compressionPerLevel
* Signature: (J)Ljava/util/List;
* Signature: (J)[B
*/
jobject Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb_compression_list_helper(env,
@ -3072,12 +3066,12 @@ void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits(
/*
* Class: org_rocksdb_DBOptions
* Method: newDBOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
jobject jobj) {
jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
jclass jcls) {
rocksdb::DBOptions* dbop = new rocksdb::DBOptions();
rocksdb::DBOptionsJni::setHandle(env, jobj, dbop);
return reinterpret_cast<jlong>(dbop);
}
/*
@ -3872,12 +3866,12 @@ jlong Java_org_rocksdb_DBOptions_bytesPerSync(
/*
* Class: org_rocksdb_WriteOptions
* Method: newWriteOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_WriteOptions_newWriteOptions(
JNIEnv* env, jobject jwrite_options) {
jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
JNIEnv* env, jclass jcls) {
rocksdb::WriteOptions* op = new rocksdb::WriteOptions();
rocksdb::WriteOptionsJni::setHandle(env, jwrite_options, op);
return reinterpret_cast<jlong>(op);
}
/*
@ -3889,8 +3883,6 @@ void Java_org_rocksdb_WriteOptions_disposeInternal(
JNIEnv* env, jobject jwrite_options, jlong jhandle) {
auto write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
delete write_options;
rocksdb::WriteOptionsJni::setHandle(env, jwrite_options, nullptr);
}
/*
@ -3939,12 +3931,12 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL(
/*
* Class: org_rocksdb_ReadOptions
* Method: newReadOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_ReadOptions_newReadOptions(
JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_ReadOptions_newReadOptions(
JNIEnv* env, jclass jcls) {
auto read_opt = new rocksdb::ReadOptions();
rocksdb::ReadOptionsJni::setHandle(env, jobj, read_opt);
return reinterpret_cast<jlong>(read_opt);
}
/*
@ -3955,7 +3947,6 @@ void Java_org_rocksdb_ReadOptions_newReadOptions(
void Java_org_rocksdb_ReadOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
rocksdb::ReadOptionsJni::setHandle(env, jobj, nullptr);
}
/*
@ -4052,12 +4043,12 @@ jlong Java_org_rocksdb_ReadOptions_snapshot(
/*
* Class: org_rocksdb_ComparatorOptions
* Method: newComparatorOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
JNIEnv* env, jclass jcls) {
auto comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
rocksdb::ComparatorOptionsJni::setHandle(env, jobj, comparator_opt);
return reinterpret_cast<jlong>(comparator_opt);
}
/*
@ -4090,7 +4081,6 @@ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex(
void Java_org_rocksdb_ComparatorOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
rocksdb::ComparatorOptionsJni::setHandle(env, jobj, nullptr);
}
/////////////////////////////////////////////////////////////////////
@ -4099,12 +4089,12 @@ void Java_org_rocksdb_ComparatorOptions_disposeInternal(
/*
* Class: org_rocksdb_FlushOptions
* Method: newFlushOptions
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_FlushOptions_newFlushOptions(
JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
JNIEnv* env, jclass jcls) {
auto flush_opt = new rocksdb::FlushOptions();
rocksdb::FlushOptionsJni::setHandle(env, jobj, flush_opt);
return reinterpret_cast<jlong>(flush_opt);
}
/*
@ -4137,5 +4127,4 @@ jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
void Java_org_rocksdb_FlushOptions_disposeInternal(
JNIEnv * env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
rocksdb::FlushOptionsJni::setHandle(env, jobj, nullptr);
}

@ -49,27 +49,25 @@ template<class PTR, class DERIVED> class RocksDBNativeClass {
assert(jclazz != nullptr);
return jclazz;
}
};
// Get the field id of the member variable to store
// the ptr
static jfieldID getHandleFieldID(JNIEnv* env) {
static jfieldID fid = env->GetFieldID(
DERIVED::getJClass(env), "nativeHandle_", "J");
assert(fid != nullptr);
return fid;
}
// Native class template for sub-classes of RocksMutableObject
template<class PTR, class DERIVED> class NativeRocksMutableObject
: public RocksDBNativeClass<PTR, DERIVED> {
public:
// Get the pointer from Java
static PTR getHandle(JNIEnv* env, jobject jobj) {
return reinterpret_cast<PTR>(
env->GetLongField(jobj, getHandleFieldID(env)));
static jmethodID getSetNativeHandleMethod(JNIEnv* env) {
static jmethodID mid = env->GetMethodID(
DERIVED::getJClass(env), "setNativeHandle", "(JZ)V");
assert(mid != nullptr);
return mid;
}
// Pass the pointer to the java side.
static void setHandle(JNIEnv* env, jobject jdb, PTR ptr) {
env->SetLongField(
jdb, getHandleFieldID(env),
reinterpret_cast<jlong>(ptr));
static void setHandle(JNIEnv* env, jobject jobj, PTR ptr,
jboolean java_owns_handle) {
env->CallVoidMethod(jobj, getSetNativeHandleMethod(env),
reinterpret_cast<jlong>(ptr), java_owns_handle);
}
};
@ -407,7 +405,7 @@ class AbstractComparatorJni : public RocksDBNativeClass<
};
// The portal class for org.rocksdb.AbstractSlice
class AbstractSliceJni : public RocksDBNativeClass<
class AbstractSliceJni : public NativeRocksMutableObject<
const rocksdb::Slice*, AbstractSliceJni> {
public:
static jclass getJClass(JNIEnv* env) {
@ -649,67 +647,6 @@ class WriteEntryJni {
assert(jclazz != nullptr);
return jclazz;
}
static void setWriteType(JNIEnv* env, jobject jwrite_entry,
WriteType write_type) {
jobject jwrite_type;
switch (write_type) {
case kPutRecord:
jwrite_type = WriteTypeJni::PUT(env);
break;
case kMergeRecord:
jwrite_type = WriteTypeJni::MERGE(env);
break;
case kDeleteRecord:
jwrite_type = WriteTypeJni::DELETE(env);
break;
case kLogDataRecord:
jwrite_type = WriteTypeJni::LOG(env);
break;
default:
jwrite_type = nullptr;
}
assert(jwrite_type != nullptr);
env->SetObjectField(jwrite_entry, getWriteTypeField(env), jwrite_type);
}
static void setKey(JNIEnv* env, jobject jwrite_entry,
const rocksdb::Slice* slice) {
jobject jkey = env->GetObjectField(jwrite_entry, getKeyField(env));
AbstractSliceJni::setHandle(env, jkey, slice);
}
static void setValue(JNIEnv* env, jobject jwrite_entry,
const rocksdb::Slice* slice) {
jobject jvalue = env->GetObjectField(jwrite_entry, getValueField(env));
AbstractSliceJni::setHandle(env, jvalue, slice);
}
private:
static jfieldID getWriteTypeField(JNIEnv* env) {
static jfieldID fid = env->GetFieldID(
getJClass(env), "type", "Lorg/rocksdb/WBWIRocksIterator$WriteType;");
assert(fid != nullptr);
return fid;
}
static jfieldID getKeyField(JNIEnv* env) {
static jfieldID fid = env->GetFieldID(
getJClass(env), "key", "Lorg/rocksdb/DirectSlice;");
assert(fid != nullptr);
return fid;
}
static jfieldID getValueField(JNIEnv* env) {
static jfieldID fid = env->GetFieldID(
getJClass(env), "value", "Lorg/rocksdb/DirectSlice;");
assert(fid != nullptr);
return fid;
}
};
class InfoLogLevelJni {

@ -12,16 +12,13 @@
/*
* Class: org_rocksdb_RemoveEmptyValueCompactionFilter
* Method: createNewRemoveEmptyValueCompactionFilter0
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0(
JNIEnv* env, jobject jobj) {
const rocksdb::RemoveEmptyValueCompactionFilter* compaction_filter =
jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0(
JNIEnv* env, jclass jcls) {
auto* compaction_filter =
new rocksdb::RemoveEmptyValueCompactionFilter();
// set the native handle to our native compaction filter
static jclass jclazz =
env->FindClass("org/rocksdb/RemoveEmptyValueCompactionFilter");
static jfieldID fid = env->GetFieldID(jclazz, "nativeHandle_", "J");
env->SetLongField(jobj, fid, reinterpret_cast<jlong>(compaction_filter));
return reinterpret_cast<jlong>(compaction_filter);
}

@ -22,17 +22,17 @@
* Signature: (Z)J
*/
jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
jobject jobj, jboolean keep_log_files) {
jclass jcls, jboolean keep_log_files) {
auto ropt = new rocksdb::RestoreOptions(keep_log_files);
return reinterpret_cast<jlong>(ropt);
}
/*
* Class: org_rocksdb_RestoreOptions
* Method: dispose
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_RestoreOptions_dispose(JNIEnv* env, jobject jobj,
void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj,
jlong jhandle) {
auto ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle);
assert(ropt);
@ -45,8 +45,8 @@ void Java_org_rocksdb_RestoreOptions_dispose(JNIEnv* env, jobject jobj,
* Signature: (J)J
*/
jlong Java_org_rocksdb_RestoreBackupableDB_newRestoreBackupableDB(JNIEnv* env,
jobject jobj, jlong jopt_handle) {
auto opt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jopt_handle);
jclass jcls, jlong jopt_handle) {
auto* opt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jopt_handle);
auto rdb = new rocksdb::RestoreBackupableDB(rocksdb::Env::Default(), *opt);
return reinterpret_cast<jlong>(rdb);
}
@ -185,11 +185,11 @@ void Java_org_rocksdb_RestoreBackupableDB_garbageCollect(
/*
* Class: org_rocksdb_RestoreBackupableDB
* Method: dispose
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_RestoreBackupableDB_dispose(JNIEnv* env, jobject jobj,
jlong jhandle) {
void Java_org_rocksdb_RestoreBackupableDB_disposeInternal(JNIEnv* env,
jobject jobj, jlong jhandle) {
auto ropt = reinterpret_cast<rocksdb::RestoreBackupableDB*>(jhandle);
assert(ropt);
delete ropt;

@ -11,6 +11,7 @@
#include <stdlib.h>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <algorithm>
@ -26,217 +27,142 @@
//////////////////////////////////////////////////////////////////////////////
// rocksdb::DB::Open
/*
* Class: org_rocksdb_RocksDB
* Method: open
* Signature: (JLjava/lang/String;)V
*/
void Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(
JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) {
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path,
std::function<rocksdb::Status(
const rocksdb::Options&, const std::string&, rocksdb::DB**)> open_fn
) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
rocksdb::DB* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, &db);
const char* db_path = env->GetStringUTFChars(jdb_path, NULL);
rocksdb::Status s = open_fn(*opt, db_path, &db);
env->ReleaseStringUTFChars(jdb_path, db_path);
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jdb, db);
return;
return reinterpret_cast<jlong>(db);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return 0;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_RocksDB
* Method: openROnly
* Signature: (JLjava/lang/String;)V
* Method: open
* Signature: (JLjava/lang/String;)J
*/
void Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2(
JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) {
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
rocksdb::DB* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
rocksdb::Status s = rocksdb::DB::OpenForReadOnly(*opt,
db_path, &db);
env->ReleaseStringUTFChars(jdb_path, db_path);
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jdb, db);
return;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) {
return rocksdb_open_helper(env, jopt_handle, jdb_path,
(rocksdb::Status(*)
(const rocksdb::Options&, const std::string&, rocksdb::DB**)
)&rocksdb::DB::Open
);
}
/*
* Class: org_rocksdb_RocksDB
* Method: openROnly
* Signature: (JLjava/lang/String;Ljava/util/List;I)Ljava/util/List;
* Signature: (JLjava/lang/String;)J
*/
jobject
Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2Ljava_util_List_2I(
JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path,
jobject jcfdesc_list, jint jcfdesc_count) {
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
rocksdb::DB* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
std::vector<jbyte*> cfnames_to_free;
std::vector<jbyteArray> jcfnames_for_free;
jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2(
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) {
return rocksdb_open_helper(env, jopt_handle, jdb_path, [](
const rocksdb::Options& options,
const std::string& db_path, rocksdb::DB** db) {
return rocksdb::DB::OpenForReadOnly(options, db_path, db);
});
}
jlongArray rocksdb_open_helper(JNIEnv* env, jlong jopt_handle,
jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options,
std::function<rocksdb::Status(
const rocksdb::DBOptions&, const std::string&,
const std::vector<rocksdb::ColumnFamilyDescriptor>&,
std::vector<rocksdb::ColumnFamilyHandle*>*,
rocksdb::DB**)> open_fn
) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
const char* db_path = env->GetStringUTFChars(jdb_path, NULL);
std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
std::vector<rocksdb::ColumnFamilyHandle* > handles;
// get iterator for ColumnFamilyDescriptors
jobject iteratorObj = env->CallObjectMethod(
jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over ColumnFamilyDescriptors
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get ColumnFamilyDescriptor
jobject jcf_descriptor = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
// get ColumnFamilyName
jbyteArray cf_name_in_byte_array = static_cast<jbyteArray>(
env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod(
env)));
// get CF Options
jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod(
env));
rocksdb::ColumnFamilyOptions* cfOptions =
rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj);
jbyte* cfname = env->GetByteArrayElements(cf_name_in_byte_array, 0);
const int len = env->GetArrayLength(cf_name_in_byte_array);
// free allocated cfnames after call to open
cfnames_to_free.push_back(cfname);
jcfnames_for_free.push_back(cf_name_in_byte_array);
column_families.push_back(rocksdb::ColumnFamilyDescriptor(
std::string(reinterpret_cast<char *>(cfname), len), *cfOptions));
}
rocksdb::Status s = rocksdb::DB::OpenForReadOnly(*opt,
db_path, column_families, &handles, &db);
env->ReleaseStringUTFChars(jdb_path, db_path);
// free jbyte allocations
for (std::vector<jbyte*>::size_type i = 0;
i != cfnames_to_free.size(); i++) {
// free cfnames
env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0);
jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL);
for(int i = 0; i < len_cols; i++) {
jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL);
const int jcf_name_len = env->GetArrayLength(jcn_ba);
//TODO(AR) do I need to make a copy of jco[i] ?
std::string cf_name (reinterpret_cast<char *>(jcf_name), jcf_name_len);
rocksdb::ColumnFamilyOptions* cf_options =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[i]);
column_families.push_back(
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
env->DeleteLocalRef(jcn);
}
env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
std::vector<rocksdb::ColumnFamilyHandle*> handles;
rocksdb::DB* db = nullptr;
rocksdb::Status s = open_fn(*opt, db_path, column_families,
&handles, &db);
// check if open operation was successful
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jdb, db);
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jListClazz);
jobject jcfhandle_list = env->NewObject(jListClazz,
midList, handles.size());
// insert in java list
for (std::vector<rocksdb::ColumnFamilyHandle*>::size_type i = 0;
i != handles.size(); i++) {
// jlong must be converted to Long due to collections restrictions
jclass jLongClazz = env->FindClass("java/lang/Long");
jmethodID midLong = env->GetMethodID(jLongClazz, "<init>", "(J)V");
jobject obj = env->NewObject(jLongClazz, midLong,
reinterpret_cast<jlong>(handles[i]));
env->CallBooleanMethod(jcfhandle_list,
rocksdb::ListJni::getListAddMethodId(env), obj);
jsize resultsLen = 1 + len_cols; //db handle + column family handles
jlong results[resultsLen];
results[0] = reinterpret_cast<jlong>(db);
for(int i = 1; i <= len_cols; i++) {
results[i] = reinterpret_cast<jlong>(handles[i - 1]);
}
return jcfhandle_list;
jlongArray jresults = env->NewLongArray(resultsLen);
env->SetLongArrayRegion(jresults, 0, resultsLen, results);
return jresults;
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return NULL;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return nullptr;
}
/*
* Class: org_rocksdb_RocksDB
* Method: open
* Signature: (JLjava/lang/String;Ljava/util/List;I)Ljava/util/List;
* Method: openROnly
* Signature: (JLjava/lang/String;[[B[J)[J
*/
jobject Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2Ljava_util_List_2I(
JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path,
jobject jcfdesc_list, jint jcfdesc_count) {
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
rocksdb::DB* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
std::vector<jbyte*> cfnames_to_free;
std::vector<jbyteArray> jcfnames_for_free;
std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
std::vector<rocksdb::ColumnFamilyHandle* > handles;
// get iterator for ColumnFamilyDescriptors
jobject iteratorObj = env->CallObjectMethod(
jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over ColumnFamilyDescriptors
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get ColumnFamilyDescriptor
jobject jcf_descriptor = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
// get ColumnFamilyName
jbyteArray cf_name_in_byte_array = static_cast<jbyteArray>(
env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod(
env)));
// get CF Options
jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod(
env));
rocksdb::ColumnFamilyOptions* cfOptions =
rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj);
jbyte* cfname = env->GetByteArrayElements(cf_name_in_byte_array, 0);
const int len = env->GetArrayLength(cf_name_in_byte_array);
// free allocated cfnames after call to open
cfnames_to_free.push_back(cfname);
jcfnames_for_free.push_back(cf_name_in_byte_array);
column_families.push_back(rocksdb::ColumnFamilyDescriptor(
std::string(reinterpret_cast<char *>(cfname), len), *cfOptions));
}
rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, column_families,
&handles, &db);
env->ReleaseStringUTFChars(jdb_path, db_path);
// free jbyte allocations
for (std::vector<jbyte*>::size_type i = 0;
i != cfnames_to_free.size(); i++) {
// free cfnames
env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0);
}
// check if open operation was successful
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jdb, db);
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jListClazz);
jobject jcfhandle_list = env->NewObject(jListClazz,
midList, handles.size());
// insert in java list
for (std::vector<rocksdb::ColumnFamilyHandle*>::size_type i = 0;
i != handles.size(); i++) {
// jlong must be converted to Long due to collections restrictions
jclass jLongClazz = env->FindClass("java/lang/Long");
jmethodID midLong = env->GetMethodID(jLongClazz, "<init>", "(J)V");
jobject obj = env->NewObject(jLongClazz, midLong,
reinterpret_cast<jlong>(handles[i]));
env->CallBooleanMethod(jcfhandle_list,
rocksdb::ListJni::getListAddMethodId(env), obj);
}
jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J(
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options) {
return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names,
jcolumn_options, [](
const rocksdb::DBOptions& options, const std::string& db_path,
const std::vector<rocksdb::ColumnFamilyDescriptor>& column_families,
std::vector<rocksdb::ColumnFamilyHandle*>* handles, rocksdb::DB** db) {
return rocksdb::DB::OpenForReadOnly(options, db_path, column_families,
handles, db);
});
}
return jcfhandle_list;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return nullptr;
/*
* Class: org_rocksdb_RocksDB
* Method: open
* Signature: (JLjava/lang/String;[[B[J)[J
*/
jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J(
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options) {
return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names,
jcolumn_options, (rocksdb::Status(*)
(const rocksdb::DBOptions&, const std::string&,
const std::vector<rocksdb::ColumnFamilyDescriptor>&,
std::vector<rocksdb::ColumnFamilyHandle*>*, rocksdb::DB**)
)&rocksdb::DB::Open
);
}
//////////////////////////////////////////////////////////////////////////////
@ -245,25 +171,21 @@ jobject Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2Ljava_util_List_2I(
/*
* Class: org_rocksdb_RocksDB
* Method: listColumnFamilies
* Signature: (JLjava/lang/String;)Ljava/util/List;
* Signature: (JLjava/lang/String;)[[B
*/
jobject Java_org_rocksdb_RocksDB_listColumnFamilies(
jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(
JNIEnv* env, jclass jclazz, jlong jopt_handle, jstring jdb_path) {
std::vector<std::string> column_family_names;
auto opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
auto* opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
jobject jvalue_list = nullptr;
rocksdb::Status s = rocksdb::DB::ListColumnFamilies(*opt, db_path,
&column_family_names);
env->ReleaseStringUTFChars(jdb_path, db_path);
if (s.ok()) {
// Don't reuse class pointer
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID mid = rocksdb::ListJni::getArrayListConstructorMethodId(env,
jListClazz);
jvalue_list = env->NewObject(jListClazz, mid, column_family_names.size());
jclass jcls_ba = env->FindClass("[B");
jobjectArray jresults = env->NewObjectArray(
static_cast<jsize>(column_family_names.size()), jcls_ba, NULL);
if (s.ok()) {
for (std::vector<std::string>::size_type i = 0;
i < column_family_names.size(); i++) {
jbyteArray jcf_value =
@ -271,11 +193,11 @@ jobject Java_org_rocksdb_RocksDB_listColumnFamilies(
env->SetByteArrayRegion(
jcf_value, 0, static_cast<jsize>(column_family_names[i].size()),
reinterpret_cast<const jbyte*>(column_family_names[i].data()));
env->CallBooleanMethod(jvalue_list,
rocksdb::ListJni::getListAddMethodId(env), jcf_value);
env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jcf_value);
env->DeleteLocalRef(jcf_value);
}
}
return jvalue_list;
return jresults;
}
//////////////////////////////////////////////////////////////////////////////
@ -398,12 +320,12 @@ void Java_org_rocksdb_RocksDB_put__JJ_3BI_3BIJ(
/*
* Class: org_rocksdb_RocksDB
* Method: write0
* Signature: (JJ)V
* Signature: (JJJ)V
*/
void Java_org_rocksdb_RocksDB_write0(
JNIEnv* env, jobject jdb,
JNIEnv* env, jobject jdb, jlong jdb_handle,
jlong jwrite_options_handle, jlong jwb_handle) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
jwrite_options_handle);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
@ -418,12 +340,12 @@ void Java_org_rocksdb_RocksDB_write0(
/*
* Class: org_rocksdb_RocksDB
* Method: write1
* Signature: (JJ)V
* Signature: (JJJ)V
*/
void Java_org_rocksdb_RocksDB_write1(
JNIEnv* env, jobject jdb,
JNIEnv* env, jobject jdb, jlong jdb_handle,
jlong jwrite_options_handle, jlong jwbwi_handle) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
jwrite_options_handle);
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
@ -470,12 +392,12 @@ jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db,
/*
* Class: org_rocksdb_RocksDB
* Method: keyMayExist
* Signature: ([BILjava/lang/StringBuffer;)Z
* Signature: (J[BILjava/lang/StringBuffer;)Z
*/
jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BILjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jbyteArray jkey, jint jkey_len,
jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len,
jobject jstring_buffer) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
return key_may_exist_helper(env, db, rocksdb::ReadOptions(),
nullptr, jkey, jkey_len, jstring_buffer);
}
@ -483,13 +405,13 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BILjava_lang_StringBuffer_2(
/*
* Class: org_rocksdb_RocksDB
* Method: keyMayExist
* Signature: ([BIJLjava/lang/StringBuffer;)Z
* Signature: (J[BIJLjava/lang/StringBuffer;)Z
*/
jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BIJLjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jbyteArray jkey, jint jkey_len,
jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len,
jlong jcf_handle, jobject jstring_buffer) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
jcf_handle);
if (cf_handle != nullptr) {
return key_may_exist_helper(env, db, rocksdb::ReadOptions(),
@ -497,19 +419,19 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BIJLjava_lang_StringBuffer_2(
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
return true;
}
return true;
}
/*
* Class: org_rocksdb_RocksDB
* Method: keyMayExist
* Signature: (J[BILjava/lang/StringBuffer;)Z
* Signature: (JJ[BILjava/lang/StringBuffer;)Z
*/
jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jread_options_handle,
jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BILjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle,
jbyteArray jkey, jint jkey_len, jobject jstring_buffer) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
jread_options_handle);
return key_may_exist_helper(env, db, read_options,
@ -519,15 +441,15 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2(
/*
* Class: org_rocksdb_RocksDB
* Method: keyMayExist
* Signature: (J[BIJLjava/lang/StringBuffer;)Z
* Signature: (JJ[BIJLjava/lang/StringBuffer;)Z
*/
jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jread_options_handle,
jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIJLjava_lang_StringBuffer_2(
JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle,
jbyteArray jkey, jint jkey_len, jlong jcf_handle, jobject jstring_buffer) {
rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb);
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
jread_options_handle);
auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
jcf_handle);
if (cf_handle != nullptr) {
return key_may_exist_helper(env, db, read_options, cf_handle,
@ -535,8 +457,8 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2(
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env,
rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
return true;
}
return true;
}
//////////////////////////////////////////////////////////////////////////////
@ -703,49 +625,38 @@ jint rocksdb_get_helper(
}
// cf multi get
jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
const rocksdb::ReadOptions& rOpt, jobject jkey_list, jint jkeys_count,
jobject jcfhandle_list) {
std::vector<rocksdb::Slice> keys;
std::vector<jbyte*> keys_to_free;
jobjectArray multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
const rocksdb::ReadOptions& rOpt, jobjectArray jkeys,
jlongArray jcolumn_family_handles) {
std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
if (jcfhandle_list != nullptr) {
// get cf iterator
jobject cfIteratorObj = env->CallObjectMethod(
jcfhandle_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over keys and convert java byte array to slice
while (env->CallBooleanMethod(
cfIteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
jobject jobj = (jbyteArray) env->CallObjectMethod(
cfIteratorObj, rocksdb::ListJni::getNextMethod(env));
rocksdb::ColumnFamilyHandle* cfHandle =
rocksdb::ColumnFamilyHandleJni::getHandle(env, jobj);
cf_handles.push_back(cfHandle);
if (jcolumn_family_handles != nullptr) {
jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL);
for (int i = 0; i < len_cols; i++) {
auto* cf_handle =
reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
cf_handles.push_back(cf_handle);
}
env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
}
// Process key list
// get iterator
jobject iteratorObj = env->CallObjectMethod(
jkey_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over keys and convert java byte array to slice
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
jbyteArray jkey = (jbyteArray) env->CallObjectMethod(
iteratorObj, rocksdb::ListJni::getNextMethod(env));
jint key_length = env->GetArrayLength(jkey);
jbyte* key = new jbyte[key_length];
env->GetByteArrayRegion(jkey, 0, key_length, key);
// store allocated jbyte to free it after multiGet call
keys_to_free.push_back(key);
rocksdb::Slice key_slice(
reinterpret_cast<char*>(key), key_length);
std::vector<rocksdb::Slice> keys;
std::vector<std::tuple<jbyteArray, jbyte*, jobject>> keys_to_free;
jsize len_keys = env->GetArrayLength(jkeys);
if(env->EnsureLocalCapacity(len_keys) != 0) {
// out of memory
return NULL;
}
for (int i = 0; i < len_keys; i++) {
jobject jk = env->GetObjectArrayElement(jkeys, i);
jbyteArray jk_ba = reinterpret_cast<jbyteArray>(jk);
jsize len_key = env->GetArrayLength(jk_ba);
jbyte* jk_val = env->GetByteArrayElements(jk_ba, NULL);
rocksdb::Slice key_slice(reinterpret_cast<char*>(jk_val), len_key);
keys.push_back(key_slice);
keys_to_free.push_back(std::make_tuple(jk_ba, jk_val, jk));
}
std::vector<std::string> values;
@ -756,13 +667,23 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
s = db->MultiGet(rOpt, cf_handles, keys, &values);
}
// Don't reuse class pointer
jclass jclazz = env->FindClass("java/util/ArrayList");
jmethodID mid = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jclazz);
jobject jvalue_list = env->NewObject(jclazz, mid, jkeys_count);
// free up allocated byte arrays
for (std::vector<std::tuple<jbyteArray, jbyte*, jobject>>::size_type i = 0;
i < keys_to_free.size(); i++) {
jobject jk;
jbyteArray jk_ba;
jbyte* jk_val;
std::tie(jk_ba, jk_val, jk) = keys_to_free[i];
env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT);
env->DeleteLocalRef(jk);
}
// prepare the results
jclass jcls_ba = env->FindClass("[B");
jobjectArray jresults =
env->NewObjectArray(static_cast<jsize>(s.size()), jcls_ba, NULL);
// insert in java list
// add to the jresults
for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
if (s[i].ok()) {
jbyteArray jentry_value =
@ -770,73 +691,60 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
env->SetByteArrayRegion(
jentry_value, 0, static_cast<jsize>(values[i].size()),
reinterpret_cast<const jbyte*>(values[i].c_str()));
env->CallBooleanMethod(
jvalue_list, rocksdb::ListJni::getListAddMethodId(env),
jentry_value);
} else {
env->CallBooleanMethod(
jvalue_list, rocksdb::ListJni::getListAddMethodId(env), nullptr);
env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
env->DeleteLocalRef(jentry_value);
}
}
// free up allocated byte arrays
for (std::vector<jbyte*>::size_type i = 0; i != keys_to_free.size(); i++) {
delete[] keys_to_free[i];
}
keys_to_free.clear();
return jvalue_list;
return jresults;
}
/*
* Class: org_rocksdb_RocksDB
* Method: multiGet
* Signature: (JLjava/util/List;I)Ljava/util/List;
* Signature: (J[[B)[[B
*/
jobject Java_org_rocksdb_RocksDB_multiGet__JLjava_util_List_2I(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jobject jkey_list, jint jkeys_count) {
jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B(
JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys) {
return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
rocksdb::ReadOptions(), jkey_list, jkeys_count, nullptr);
rocksdb::ReadOptions(), jkeys, nullptr);
}
/*
* Class: org_rocksdb_RocksDB
* Method: multiGet
* Signature: (JLjava/util/List;ILjava/util/List;)Ljava/util/List;
* Signature: (J[[B[J)[[B
*/
jobject
Java_org_rocksdb_RocksDB_multiGet__JLjava_util_List_2ILjava_util_List_2(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jobject jkey_list, jint jkeys_count, jobject jcfhandle_list) {
jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3J(
JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys,
jlongArray jcolumn_family_handles) {
return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
rocksdb::ReadOptions(), jkey_list, jkeys_count, jcfhandle_list);
rocksdb::ReadOptions(), jkeys, jcolumn_family_handles);
}
/*
* Class: org_rocksdb_RocksDB
* Method: multiGet
* Signature: (JJLjava/util/List;I)Ljava/util/List;
* Signature: (JJ[[B)[[B
*/
jobject Java_org_rocksdb_RocksDB_multiGet__JJLjava_util_List_2I(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jlong jropt_handle, jobject jkey_list, jint jkeys_count) {
jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B(
JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
jobjectArray jkeys) {
return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
*reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkey_list,
jkeys_count, nullptr);
*reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, nullptr);
}
/*
* Class: org_rocksdb_RocksDB
* Method: multiGet
* Signature: (JJLjava/util/List;ILjava/util/List;)Ljava/util/List;
* Signature: (JJ[[B[J)[[B
*/
jobject
Java_org_rocksdb_RocksDB_multiGet__JJLjava_util_List_2ILjava_util_List_2(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jlong jropt_handle, jobject jkey_list, jint jkeys_count,
jobject jcfhandle_list) {
jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3J(
JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
jobjectArray jkeys, jlongArray jcolumn_family_handles) {
return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
*reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkey_list,
jkeys_count, jcfhandle_list);
*reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys,
jcolumn_family_handles);
}
/*
@ -1204,47 +1112,42 @@ jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(
/*
* Class: org_rocksdb_RocksDB
* Method: iterators
* Signature: (JLjava/util/List;J)[J
* Signature: (J[JJ)[J
*/
jlongArray Java_org_rocksdb_RocksDB_iterators(
JNIEnv* env, jobject jdb, jlong db_handle, jobject jcfhandle_list,
jlong jread_options_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(db_handle);
JNIEnv* env, jobject jdb, jlong db_handle,
jlongArray jcolumn_family_handles, jlong jread_options_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
jread_options_handle);
std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
std::vector<rocksdb::Iterator*> iterators;
if (jcfhandle_list != nullptr) {
// get cf iterator
jobject cfIteratorObj = env->CallObjectMethod(
jcfhandle_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over keys and convert java byte array to slice
while (env->CallBooleanMethod(
cfIteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
jobject jobj = (jbyteArray) env->CallObjectMethod(
cfIteratorObj, rocksdb::ListJni::getNextMethod(env));
rocksdb::ColumnFamilyHandle* cfHandle =
rocksdb::ColumnFamilyHandleJni::getHandle(env, jobj);
cf_handles.push_back(cfHandle);
if (jcolumn_family_handles != nullptr) {
jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL);
for (int i = 0; i < len_cols; i++) {
auto* cf_handle =
reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
cf_handles.push_back(cf_handle);
}
env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
}
std::vector<rocksdb::Iterator*> iterators;
rocksdb::Status s = db->NewIterators(read_options,
cf_handles, &iterators);
if (s.ok()) {
jlongArray jLongArray =
env->NewLongArray(static_cast<jsize>(iterators.size()));
for (std::vector<rocksdb::Iterator*>::size_type i = 0; i < iterators.size();
i++) {
for (std::vector<rocksdb::Iterator*>::size_type i = 0;
i < iterators.size(); i++) {
env->SetLongArrayRegion(jLongArray, static_cast<jsize>(i), 1,
reinterpret_cast<const jlong*>(&iterators[i]));
}
return jLongArray;
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return NULL;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return env->NewLongArray(0);
}
/*
@ -1262,32 +1165,23 @@ jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(
/*
* Class: org_rocksdb_RocksDB
* Method: createColumnFamily
* Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;)J;
* Signature: (J[BJ)J
*/
jlong Java_org_rocksdb_RocksDB_createColumnFamily(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jobject jcf_descriptor) {
jbyteArray jcolumn_name, jlong jcolumn_options) {
rocksdb::ColumnFamilyHandle* handle;
auto db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
// get ColumnFamilyName
jbyteArray byteArray = static_cast<jbyteArray>(env->CallObjectMethod(
jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod(
env)));
// get CF Options
jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod(
env));
rocksdb::ColumnFamilyOptions* cfOptions =
rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj);
jbyte* cfname = env->GetByteArrayElements(byteArray, 0);
const int len = env->GetArrayLength(byteArray);
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0);
const int len = env->GetArrayLength(jcolumn_name);
auto* cfOptions =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
rocksdb::Status s = db_handle->CreateColumnFamily(
*cfOptions, std::string(reinterpret_cast<char *>(cfname), len), &handle);
env->ReleaseByteArrayElements(byteArray, cfname, 0);
env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
if (s.ok()) {
return reinterpret_cast<jlong>(handle);

@ -22,12 +22,11 @@
/*
* Class: org_rocksdb_AbstractSlice
* Method: createNewSliceFromString
* Signature: (Ljava/lang/String;)V
* Signature: (Ljava/lang/String;)J
*/
void Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
JNIEnv* env, jobject jobj, jstring jstr) {
const auto* str = env->GetStringUTFChars(jstr, 0);
jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
JNIEnv * env, jclass jcls, jstring jstr) {
const auto* str = env->GetStringUTFChars(jstr, NULL);
const size_t len = strlen(str);
char* buf = new char[len + 1];
memcpy(buf, str, len);
@ -35,7 +34,7 @@ void Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
env->ReleaseStringUTFChars(jstr, str);
const auto* slice = new rocksdb::Slice(buf);
rocksdb::AbstractSliceJni::setHandle(env, jobj, slice);
return reinterpret_cast<jlong>(slice);
}
/*
@ -115,10 +114,10 @@ void Java_org_rocksdb_AbstractSlice_disposeInternal(
/*
* Class: org_rocksdb_Slice
* Method: createNewSlice0
* Signature: ([BI)V
* Signature: ([BI)J
*/
void Java_org_rocksdb_Slice_createNewSlice0(
JNIEnv * env, jobject jobj, jbyteArray data, jint offset) {
jlong Java_org_rocksdb_Slice_createNewSlice0(
JNIEnv * env, jclass jcls, jbyteArray data, jint offset) {
const jsize dataSize = env->GetArrayLength(data);
const int len = dataSize - offset;
@ -126,32 +125,33 @@ void Java_org_rocksdb_Slice_createNewSlice0(
env->GetByteArrayRegion(data, offset, len, ptrData);
const auto* slice = new rocksdb::Slice((const char*)ptrData, len);
rocksdb::AbstractSliceJni::setHandle(env, jobj, slice);
return reinterpret_cast<jlong>(slice);
}
/*
* Class: org_rocksdb_Slice
* Method: createNewSlice1
* Signature: ([B)V
* Signature: ([B)J
*/
void Java_org_rocksdb_Slice_createNewSlice1(
JNIEnv * env, jobject jobj, jbyteArray data) {
jlong Java_org_rocksdb_Slice_createNewSlice1(
JNIEnv * env, jclass jcls, jbyteArray data) {
const int len = env->GetArrayLength(data) + 1;
jboolean isCopy;
jbyte* ptrData = env->GetByteArrayElements(data, &isCopy);
char* buf = new char[len];
// NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method
char* buf = new char[len];
memcpy(buf, ptrData, len - 1);
buf[len-1]='\0';
const auto* slice =
new rocksdb::Slice(buf, len - 1);
rocksdb::AbstractSliceJni::setHandle(env, jobj, slice);
env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT);
// NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method
return reinterpret_cast<jlong>(slice);
}
/*
@ -187,27 +187,27 @@ void Java_org_rocksdb_Slice_disposeInternalBuf(
/*
* Class: org_rocksdb_DirectSlice
* Method: createNewDirectSlice0
* Signature: (Ljava/nio/ByteBuffer;I)V
* Signature: (Ljava/nio/ByteBuffer;I)J
*/
void Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
JNIEnv* env, jobject jobj, jobject data, jint length) {
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
JNIEnv* env, jclass jcls, jobject data, jint length) {
const auto* ptrData =
reinterpret_cast<char*>(env->GetDirectBufferAddress(data));
const auto* slice = new rocksdb::Slice(ptrData, length);
rocksdb::AbstractSliceJni::setHandle(env, jobj, slice);
return reinterpret_cast<jlong>(slice);
}
/*
* Class: org_rocksdb_DirectSlice
* Method: createNewDirectSlice1
* Signature: (Ljava/nio/ByteBuffer;)V
* Signature: (Ljava/nio/ByteBuffer;)J
*/
void Java_org_rocksdb_DirectSlice_createNewDirectSlice1(
JNIEnv* env, jobject jobj, jobject data) {
jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(
JNIEnv* env, jclass jcls, jobject data) {
const auto* ptrData =
reinterpret_cast<char*>(env->GetDirectBufferAddress(data));
const auto* slice = new rocksdb::Slice(ptrData);
rocksdb::AbstractSliceJni::setHandle(env, jobj, slice);
return reinterpret_cast<jlong>(slice);
}
/*

@ -20,10 +20,10 @@
/*
* Class: org_rocksdb_TtlDB
* Method: open
* Signature: (JLjava/lang/String;IZ)V
* Signature: (JLjava/lang/String;IZ)J
*/
void Java_org_rocksdb_TtlDB_open(JNIEnv* env,
jobject jttldb, jlong joptions_handle, jstring jdb_path,
jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env,
jclass jcls, jlong joptions_handle, jstring jdb_path,
jint jttl, jboolean jread_only) {
auto* opt = reinterpret_cast<rocksdb::Options*>(joptions_handle);
rocksdb::DBWithTTL* db = nullptr;
@ -35,145 +35,102 @@ void Java_org_rocksdb_TtlDB_open(JNIEnv* env,
// as TTLDB extends RocksDB on the java side, we can reuse
// the RocksDB portal here.
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jttldb, db);
return;
return reinterpret_cast<jlong>(db);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return 0;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_TtlDB
* Method: openCF
* Signature: (JLjava/lang/String;Ljava/util/List;
* ILjava/util/List;Z)Ljava/util/List;
* Signature: (JLjava/lang/String;[[B[J[IZ)[J
*/
jobject
jlongArray
Java_org_rocksdb_TtlDB_openCF(
JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path,
jobject jcfdesc_list, jint jcfdesc_count, jobject jttl_list,
jboolean jread_only) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
rocksdb::DBWithTTL* db = nullptr;
const char* db_path = env->GetStringUTFChars(jdb_path, 0);
std::vector<jbyte*> cfnames_to_free;
std::vector<jbyteArray> jcfnames_for_free;
JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options,
jintArray jttls, jboolean jread_only) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
const char* db_path = env->GetStringUTFChars(jdb_path, NULL);
std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
std::vector<int32_t> ttl_values;
std::vector<rocksdb::ColumnFamilyHandle* > handles;
// get iterator for ColumnFamilyDescriptors
jobject iteratorObj = env->CallObjectMethod(
jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over ColumnFamilyDescriptors
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get ColumnFamilyDescriptor
jobject jcf_descriptor = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
// get ColumnFamilyName
jbyteArray byteArray = static_cast<jbyteArray>(env->CallObjectMethod(
jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod(
env)));
// get CF Options
jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod(
env));
rocksdb::ColumnFamilyOptions* cfOptions =
rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj);
jbyte* cfname = env->GetByteArrayElements(byteArray, 0);
const int len = env->GetArrayLength(byteArray);
// free allocated cfnames after call to open
cfnames_to_free.push_back(cfname);
jcfnames_for_free.push_back(byteArray);
column_families.push_back(rocksdb::ColumnFamilyDescriptor(
std::string(reinterpret_cast<char *>(cfname), len), *cfOptions));
jsize len_cols = env->GetArrayLength(jcolumn_names);
jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL);
for(int i = 0; i < len_cols; i++) {
jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL);
const int jcf_name_len = env->GetArrayLength(jcn_ba);
//TODO(AR) do I need to make a copy of jco[i] ?
std::string cf_name (reinterpret_cast<char *>(jcf_name), jcf_name_len);
rocksdb::ColumnFamilyOptions* cf_options =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[i]);
column_families.push_back(
rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
env->DeleteLocalRef(jcn);
}
// get iterator for TTL values
iteratorObj = env->CallObjectMethod(
jttl_list, rocksdb::ListJni::getIteratorMethod(env));
// iterate over TTL values
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get TTL object
jobject jttl_object = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
// get Integer value
jclass jIntClazz = env->FindClass("java/lang/Integer");
jmethodID getVal = env->GetMethodID(jIntClazz, "intValue", "()I");
ttl_values.push_back(env->CallIntMethod(jttl_object, getVal));
env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
std::vector<rocksdb::ColumnFamilyHandle*> handles;
rocksdb::DBWithTTL* db = nullptr;
std::vector<int32_t> ttl_values;
jint* jttlv = env->GetIntArrayElements(jttls, NULL);
jsize len_ttls = env->GetArrayLength(jttls);
for(int i = 0; i < len_ttls; i++) {
ttl_values.push_back(jttlv[i]);
}
env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT);
rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families,
&handles, &db, ttl_values, jread_only);
env->ReleaseStringUTFChars(jdb_path, db_path);
// free jbyte allocations
for (std::vector<jbyte*>::size_type i = 0;
i != cfnames_to_free.size(); i++) {
// free cfnames
env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0);
}
// check if open operation was successful
if (s.ok()) {
rocksdb::RocksDBJni::setHandle(env, jdb, db);
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jListClazz);
jobject jcfhandle_list = env->NewObject(jListClazz,
midList, handles.size());
// insert in java list
for (std::vector<rocksdb::ColumnFamilyHandle*>::size_type i = 0;
i != handles.size(); i++) {
// jlong must be converted to Long due to collections restrictions
jclass jLongClazz = env->FindClass("java/lang/Long");
jmethodID midLong = env->GetMethodID(jLongClazz, "<init>", "(J)V");
jobject obj = env->NewObject(jLongClazz, midLong,
reinterpret_cast<jlong>(handles[i]));
env->CallBooleanMethod(jcfhandle_list,
rocksdb::ListJni::getListAddMethodId(env), obj);
jsize resultsLen = 1 + len_cols; //db handle + column family handles
jlong results[resultsLen];
results[0] = reinterpret_cast<jlong>(db);
for(int i = 1; i <= len_cols; i++) {
results[i] = reinterpret_cast<jlong>(handles[i - 1]);
}
return jcfhandle_list;
jlongArray jresults = env->NewLongArray(resultsLen);
env->SetLongArrayRegion(jresults, 0, resultsLen, results);
return jresults;
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return NULL;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return nullptr;
}
/*
* Class: org_rocksdb_TtlDB
* Method: createColumnFamilyWithTtl
* Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;I)J;
* Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J;
*/
jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
JNIEnv* env, jobject jobj, jlong jdb_handle,
jobject jcf_descriptor, jint jttl) {
jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) {
rocksdb::ColumnFamilyHandle* handle;
auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
// get ColumnFamilyName
jbyteArray byteArray = static_cast<jbyteArray>(env->CallObjectMethod(
jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod(
env)));
// get CF Options
jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor,
rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod(
env));
rocksdb::ColumnFamilyOptions* cfOptions =
rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj);
jbyte* cfname = env->GetByteArrayElements(byteArray, 0);
const int len = env->GetArrayLength(byteArray);
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0);
const int len = env->GetArrayLength(jcolumn_name);
auto* cfOptions =
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl(
*cfOptions, std::string(reinterpret_cast<char *>(cfname),
len), &handle, jttl);
env->ReleaseByteArrayElements(byteArray, cfname, 0);
env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
if (s.ok()) {
return reinterpret_cast<jlong>(handle);

@ -27,23 +27,23 @@
/*
* Class: org_rocksdb_WriteBatch
* Method: newWriteBatch
* Signature: (I)V
* Signature: (I)J
*/
void Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jobject jobj, jint jreserved_bytes) {
jlong Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jclass jcls, jint jreserved_bytes) {
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch(
static_cast<size_t>(jreserved_bytes));
rocksdb::WriteBatchJni::setHandle(env, jobj, wb);
return reinterpret_cast<jlong>(wb);
}
/*
* Class: org_rocksdb_WriteBatch
* Method: count0
* Signature: ()I
* Signature: (J)I
*/
jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj) {
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj,
jlong jwb_handle) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
return static_cast<jint>(wb->Count());
@ -52,10 +52,11 @@ jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj) {
/*
* Class: org_rocksdb_WriteBatch
* Method: clear0
* Signature: ()V
* Signature: (J)V
*/
void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj) {
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj,
jlong jwb_handle) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
wb->Clear();
@ -64,13 +65,13 @@ void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj) {
/*
* Class: org_rocksdb_WriteBatch
* Method: put
* Signature: ([BI[BI)V
* Signature: (J[BI[BI)V
*/
void Java_org_rocksdb_WriteBatch_put___3BI_3BI(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto put = [&wb] (rocksdb::Slice key, rocksdb::Slice value) {
wb->Put(key, value);
@ -82,13 +83,13 @@ void Java_org_rocksdb_WriteBatch_put___3BI_3BI(
/*
* Class: org_rocksdb_WriteBatch
* Method: put
* Signature: ([BI[BIJ)V
* Signature: (J[BI[BIJ)V
*/
void Java_org_rocksdb_WriteBatch_put___3BI_3BIJ(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -102,13 +103,13 @@ void Java_org_rocksdb_WriteBatch_put___3BI_3BIJ(
/*
* Class: org_rocksdb_WriteBatch
* Method: merge
* Signature: ([BI[BI)V
* Signature: (J[BI[BI)V
*/
void Java_org_rocksdb_WriteBatch_merge___3BI_3BI(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto merge = [&wb] (rocksdb::Slice key, rocksdb::Slice value) {
wb->Merge(key, value);
@ -120,13 +121,13 @@ void Java_org_rocksdb_WriteBatch_merge___3BI_3BI(
/*
* Class: org_rocksdb_WriteBatch
* Method: merge
* Signature: ([BI[BIJ)V
* Signature: (J[BI[BIJ)V
*/
void Java_org_rocksdb_WriteBatch_merge___3BI_3BIJ(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -140,12 +141,12 @@ void Java_org_rocksdb_WriteBatch_merge___3BI_3BIJ(
/*
* Class: org_rocksdb_WriteBatch
* Method: remove
* Signature: ([BI)V
* Signature: (J[BI)V
*/
void Java_org_rocksdb_WriteBatch_remove___3BI(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_remove__J_3BI(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto remove = [&wb] (rocksdb::Slice key) {
wb->Delete(key);
@ -156,12 +157,12 @@ void Java_org_rocksdb_WriteBatch_remove___3BI(
/*
* Class: org_rocksdb_WriteBatch
* Method: remove
* Signature: ([BIJ)V
* Signature: (J[BIJ)V
*/
void Java_org_rocksdb_WriteBatch_remove___3BIJ(
JNIEnv* env, jobject jobj,
void Java_org_rocksdb_WriteBatch_remove__J_3BIJ(
JNIEnv* env, jobject jobj, jlong jwb_handle,
jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -174,11 +175,12 @@ void Java_org_rocksdb_WriteBatch_remove___3BIJ(
/*
* Class: org_rocksdb_WriteBatch
* Method: putLogData
* Signature: ([BI)V
* Signature: (J[BI)V
*/
void Java_org_rocksdb_WriteBatch_putLogData(
JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) {
auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jblob,
jint jblob_len) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
auto putLogData = [&wb] (rocksdb::Slice blob) {
wb->PutLogData(blob);
@ -189,11 +191,11 @@ void Java_org_rocksdb_WriteBatch_putLogData(
/*
* Class: org_rocksdb_WriteBatch
* Method: iterate
* Signature: (J)V
* Signature: (JJ)V
*/
void Java_org_rocksdb_WriteBatch_iterate(
JNIEnv* env , jobject jobj, jlong handlerHandle) {
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
JNIEnv* env , jobject jobj, jlong jwb_handle, jlong handlerHandle) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
rocksdb::Status s = wb->Iterate(
@ -218,13 +220,13 @@ void Java_org_rocksdb_WriteBatch_disposeInternal(
/*
* Class: org_rocksdb_WriteBatch_Handler
* Method: createNewHandler0
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
JNIEnv* env, jobject jobj) {
const rocksdb::WriteBatchHandlerJniCallback* h =
new rocksdb::WriteBatchHandlerJniCallback(env, jobj);
rocksdb::WriteBatchHandlerJni::setHandle(env, jobj, h);
return reinterpret_cast<jlong>(h);
}
/*

@ -28,11 +28,11 @@
/*
* Class: org_rocksdb_WriteBatchTest
* Method: getContents
* Signature: (Lorg/rocksdb/WriteBatch;)[B
* Signature: (J)[B
*/
jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
JNIEnv* env, jclass jclazz, jobject jobj) {
rocksdb::WriteBatch* b = rocksdb::WriteBatchJni::getHandle(env, jobj);
JNIEnv* env, jclass jclazz, jlong jwb_handle) {
auto* b = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(b != nullptr);
// todo: Currently the following code is directly copied from
@ -109,11 +109,11 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
/*
* Class: org_rocksdb_WriteBatchTestInternalHelper
* Method: setSequence
* Signature: (Lorg/rocksdb/WriteBatch;J)V
* Signature: (JJ)V
*/
void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence(
JNIEnv* env, jclass jclazz, jobject jobj, jlong jsn) {
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
JNIEnv* env, jclass jclazz, jlong jwb_handle, jlong jsn) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
rocksdb::WriteBatchInternal::SetSequence(
@ -123,11 +123,11 @@ void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence(
/*
* Class: org_rocksdb_WriteBatchTestInternalHelper
* Method: sequence
* Signature: (Lorg/rocksdb/WriteBatch;)J
* Signature: (J)J
*/
jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence(
JNIEnv* env, jclass jclazz, jobject jobj) {
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj);
JNIEnv* env, jclass jclazz, jlong jwb_handle) {
auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
assert(wb != nullptr);
return static_cast<jlong>(rocksdb::WriteBatchInternal::Sequence(wb));
@ -136,13 +136,13 @@ jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence(
/*
* Class: org_rocksdb_WriteBatchTestInternalHelper
* Method: append
* Signature: (Lorg/rocksdb/WriteBatch;Lorg/rocksdb/WriteBatch;)V
* Signature: (JJ)V
*/
void Java_org_rocksdb_WriteBatchTestInternalHelper_append(
JNIEnv* env, jclass jclazz, jobject jwb1, jobject jwb2) {
rocksdb::WriteBatch* wb1 = rocksdb::WriteBatchJni::getHandle(env, jwb1);
JNIEnv* env, jclass jclazz, jlong jwb_handle_1, jlong jwb_handle_2) {
auto* wb1 = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle_1);
assert(wb1 != nullptr);
rocksdb::WriteBatch* wb2 = rocksdb::WriteBatchJni::getHandle(env, jwb2);
auto* wb2 = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle_2);
assert(wb2 != nullptr);
rocksdb::WriteBatchInternal::Append(wb1, wb2);

@ -15,51 +15,50 @@
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: newWriteBatchWithIndex
* Signature: ()V
* Signature: ()J
*/
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
JNIEnv* env, jobject jobj) {
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
JNIEnv* env, jclass jcls) {
rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex();
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi);
return reinterpret_cast<jlong>(wbwi);
}
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: newWriteBatchWithIndex
* Signature: (Z)V
* Signature: (Z)J
*/
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
JNIEnv* env, jobject jobj, jboolean joverwrite_key) {
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
JNIEnv* env, jclass jcls, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi =
new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0,
static_cast<bool>(joverwrite_key));
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi);
return reinterpret_cast<jlong>(wbwi);
}
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: newWriteBatchWithIndex
* Signature: (JIZ)V
* Signature: (JIZ)J
*/
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ(
JNIEnv* env, jobject jobj, jlong jfallback_index_comparator_handle,
jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ(
JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle,
jint jreserved_bytes, jboolean joverwrite_key) {
rocksdb::WriteBatchWithIndex* wbwi =
new rocksdb::WriteBatchWithIndex(
reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle),
static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key));
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi);
return reinterpret_cast<jlong>(wbwi);
}
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: count
* Signature: ()I
* Method: count0
* Signature: (J)I
*/
jint Java_org_rocksdb_WriteBatchWithIndex_count0(
JNIEnv* env, jobject jobj) {
rocksdb::WriteBatchWithIndex* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
return static_cast<jint>(wbwi->GetWriteBatch()->Count());
@ -68,13 +67,12 @@ jint Java_org_rocksdb_WriteBatchWithIndex_count0(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: put
* Signature: ([BI[BI)V
* Signature: (J[BI[BI)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BI(
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto put = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) {
wbwi->Put(key, value);
@ -86,13 +84,13 @@ void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BI(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: put
* Signature: ([BI[BIJ)V
* Signature: (J[BI[BIJ)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BIJ(
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -106,13 +104,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BIJ(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: merge
* Signature: ([BI[BI)V
* Signature: (J[BI[BI)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BI(
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto merge = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) {
wbwi->Merge(key, value);
@ -124,13 +121,13 @@ void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BI(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: merge
* Signature: ([BI[BIJ)V
* Signature: (J[BI[BIJ)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BIJ(
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len,
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -144,12 +141,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BIJ(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: remove
* Signature: ([BI)V
* Signature: (J[BI)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_remove___3BI(
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BI(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto remove = [&wbwi] (rocksdb::Slice key) {
wbwi->Delete(key);
@ -160,13 +157,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_remove___3BI(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: remove
* Signature: ([BIJ)V
* Signature: (J[BIJ)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_remove___3BIJ(
JNIEnv* env, jobject jobj,
jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BIJ(
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
jint jkey_len, jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
assert(cf_handle != nullptr);
@ -179,12 +175,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_remove___3BIJ(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: putLogData
* Signature: ([BI)V
* Signature: (J[BI)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_putLogData(
JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) {
auto* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jblob,
jint jblob_len) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
auto putLogData = [&wbwi] (rocksdb::Slice blob) {
wbwi->PutLogData(blob);
@ -195,12 +191,11 @@ void Java_org_rocksdb_WriteBatchWithIndex_putLogData(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: clear
* Signature: ()V
* Signature: (J)V
*/
void Java_org_rocksdb_WriteBatchWithIndex_clear0(
JNIEnv* env, jobject jobj) {
rocksdb::WriteBatchWithIndex* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
assert(wbwi != nullptr);
wbwi->GetWriteBatch()->Clear();
@ -209,12 +204,11 @@ void Java_org_rocksdb_WriteBatchWithIndex_clear0(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: iterator0
* Signature: ()J
* Signature: (J)J
*/
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(
JNIEnv* env, jobject jobj) {
rocksdb::WriteBatchWithIndex* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator();
return reinterpret_cast<jlong>(wbwi_iterator);
}
@ -222,12 +216,11 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: iterator1
* Signature: (J)J
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(
JNIEnv* env, jobject jobj, jlong jcf_handle) {
rocksdb::WriteBatchWithIndex* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle);
return reinterpret_cast<jlong>(wbwi_iterator);
@ -236,12 +229,12 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(
/*
* Class: org_rocksdb_WriteBatchWithIndex
* Method: iteratorWithBase
* Signature: (JJ)J
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase(
JNIEnv* env, jobject jobj, jlong jcf_handle, jlong jbi_handle) {
rocksdb::WriteBatchWithIndex* wbwi =
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj);
JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle,
jlong jbi_handle) {
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
auto* base_iterator = reinterpret_cast<rocksdb::Iterator*>(jbi_handle);
auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator);
@ -360,27 +353,57 @@ void Java_org_rocksdb_WBWIRocksIterator_status0(
/*
* Class: org_rocksdb_WBWIRocksIterator
* Method: entry1
* Signature: (JLorg/rocksdb/WBWIRocksIterator/WriteEntry;)V
* Signature: (J)[J
*/
void Java_org_rocksdb_WBWIRocksIterator_entry1(
JNIEnv* env, jobject jobj, jlong handle, jobject jwrite_entry) {
jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(
JNIEnv* env, jobject jobj, jlong handle) {
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
const rocksdb::WriteEntry& we = it->Entry();
jobject jwe = rocksdb::WBWIRocksIteratorJni::getWriteEntry(env, jobj);
rocksdb::WriteEntryJni::setWriteType(env, jwe, we.type);
jlong results[3];
//set the type of the write entry
switch (we.type) {
case rocksdb::kPutRecord:
results[0] = 0x1;
break;
case rocksdb::kMergeRecord:
results[0] = 0x2;
break;
case rocksdb::kDeleteRecord:
results[0] = 0x4;
break;
case rocksdb::kLogDataRecord:
results[0] = 0x8;
break;
default:
results[0] = 0x0;
}
//TODO(AR) do we leak buf and value_buf?
//set the pointer to the key slice
char* buf = new char[we.key.size()];
memcpy(buf, we.key.data(), we.key.size());
auto* key_slice = new rocksdb::Slice(buf, we.key.size());
rocksdb::WriteEntryJni::setKey(env, jwe, key_slice);
results[1] = reinterpret_cast<jlong>(key_slice);
//set the pointer to the value slice
if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) {
// set native handle of value slice to null if no value available
rocksdb::WriteEntryJni::setValue(env, jwe, nullptr);
results[2] = 0;
} else {
char* value_buf = new char[we.value.size()];
memcpy(value_buf, we.value.data(), we.value.size());
auto* value_slice = new rocksdb::Slice(value_buf, we.value.size());
rocksdb::WriteEntryJni::setValue(env, jwe, value_slice);
results[2] = reinterpret_cast<jlong>(value_slice);
}
jlongArray jresults = env->NewLongArray(3);
env->SetLongArrayRegion(jresults, 0, 3, results);
return jresults;
}

@ -22,73 +22,57 @@ public class RocksDBColumnFamilySample {
String db_path = args[0];
System.out.println("RocksDBColumnFamilySample");
RocksDB db = null;
Options options = null;
ColumnFamilyHandle columnFamilyHandle = null;
WriteBatch wb = null;
try {
options = new Options().setCreateIfMissing(true);
db = RocksDB.open(options, db_path);
try(final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options, db_path)) {
assert(db != null);
// create column family
columnFamilyHandle = db.createColumnFamily(
try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf".getBytes(),
new ColumnFamilyOptions()));
assert(columnFamilyHandle != null);
} finally {
if (columnFamilyHandle != null) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
db = null;
}
if (options != null) {
options.dispose();
new ColumnFamilyOptions()))) {
assert (columnFamilyHandle != null);
}
}
// open DB with two column families
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
new ArrayList<>();
// have to open default column family
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions()));
// open the new one, too
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
"new_cf".getBytes(), new ColumnFamilyOptions()));
List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try {
db = RocksDB.open(new DBOptions(), db_path,
columnFamilyDescriptors, columnFamilyHandles);
final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try(final DBOptions options = new DBOptions();
final RocksDB db = RocksDB.open(options, db_path,
columnFamilyDescriptors, columnFamilyHandles)) {
assert(db != null);
// put and get from non-default column family
db.put(columnFamilyHandles.get(0), new WriteOptions(),
"key".getBytes(), "value".getBytes());
String value = new String(db.get(columnFamilyHandles.get(0),
"key".getBytes()));
try {
// put and get from non-default column family
db.put(columnFamilyHandles.get(0), new WriteOptions(),
"key".getBytes(), "value".getBytes());
String value = new String(db.get(columnFamilyHandles.get(0),
"key".getBytes()));
// atomic write
wb = new WriteBatch();
wb.put(columnFamilyHandles.get(0), "key2".getBytes(), "value2".getBytes());
wb.put(columnFamilyHandles.get(1), "key3".getBytes(), "value3".getBytes());
wb.remove(columnFamilyHandles.get(0), "key".getBytes());
db.write(new WriteOptions(), wb);
// atomic write
try (final WriteBatch wb = new WriteBatch()) {
wb.put(columnFamilyHandles.get(0), "key2".getBytes(),
"value2".getBytes());
wb.put(columnFamilyHandles.get(1), "key3".getBytes(),
"value3".getBytes());
wb.remove(columnFamilyHandles.get(0), "key".getBytes());
db.write(new WriteOptions(), wb);
}
// drop column family
db.dropColumnFamily(columnFamilyHandles.get(1));
} finally {
for (ColumnFamilyHandle handle : columnFamilyHandles){
handle.dispose();
}
if (db != null) {
db.close();
}
if (wb != null) {
wb.dispose();
// drop column family
db.dropColumnFamily(columnFamilyHandles.get(1));
} finally {
for (final ColumnFamilyHandle handle : columnFamilyHandles) {
handle.close();
}
}
}
}

@ -8,8 +8,10 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
import org.rocksdb.*;
import org.rocksdb.util.SizeUnit;
import java.io.IOException;
public class RocksDBSample {
@ -26,287 +28,273 @@ public class RocksDBSample {
String db_path_not_found = db_path + "_not_found";
System.out.println("RocksDBSample");
RocksDB db = null;
Options options = new Options();
try {
db = RocksDB.open(options, db_path_not_found);
assert(false);
} catch (RocksDBException e) {
System.out.format("caught the expceted exception -- %s\n", e);
assert(db == null);
}
try (final Options options = new Options();
final Filter bloomFilter = new BloomFilter(10);
final ReadOptions readOptions = new ReadOptions()
.setFillCache(false)) {
try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
assert (false);
} catch (RocksDBException e) {
System.out.format("caught the expected exception -- %s\n", e);
}
try {
options.setCreateIfMissing(true)
.createStatistics()
.setWriteBufferSize(8 * SizeUnit.KB)
.setMaxWriteBufferNumber(3)
.setMaxBackgroundCompactions(10)
.setCompressionType(CompressionType.SNAPPY_COMPRESSION)
.setCompactionStyle(CompactionStyle.UNIVERSAL);
} catch (IllegalArgumentException e) {
assert(false);
}
try {
options.setCreateIfMissing(true)
.createStatistics()
.setWriteBufferSize(8 * SizeUnit.KB)
.setMaxWriteBufferNumber(3)
.setMaxBackgroundCompactions(10)
.setCompressionType(CompressionType.SNAPPY_COMPRESSION)
.setCompactionStyle(CompactionStyle.UNIVERSAL);
} catch (IllegalArgumentException e) {
assert (false);
}
Statistics stats = options.statisticsPtr();
assert(options.createIfMissing() == true);
assert(options.writeBufferSize() == 8 * SizeUnit.KB);
assert(options.maxWriteBufferNumber() == 3);
assert(options.maxBackgroundCompactions() == 10);
assert(options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
assert(options.compactionStyle() == CompactionStyle.UNIVERSAL);
assert(options.memTableFactoryName().equals("SkipListFactory"));
options.setMemTableConfig(
new HashSkipListMemTableConfig()
.setHeight(4)
.setBranchingFactor(4)
.setBucketCount(2000000));
assert(options.memTableFactoryName().equals("HashSkipListRepFactory"));
options.setMemTableConfig(
new HashLinkedListMemTableConfig()
.setBucketCount(100000));
assert(options.memTableFactoryName().equals("HashLinkedListRepFactory"));
options.setMemTableConfig(
new VectorMemTableConfig().setReservedSize(10000));
assert(options.memTableFactoryName().equals("VectorRepFactory"));
options.setMemTableConfig(new SkipListMemTableConfig());
assert(options.memTableFactoryName().equals("SkipListFactory"));
options.setTableFormatConfig(new PlainTableConfig());
// Plain-Table requires mmap read
options.setAllowMmapReads(true);
assert(options.tableFactoryName().equals("PlainTable"));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000,
10000, 10));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000));
Filter bloomFilter = new BloomFilter(10);
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB)
.setFilter(bloomFilter)
.setCacheNumShardBits(6)
.setBlockSizeDeviation(5)
.setBlockRestartInterval(10)
.setCacheIndexAndFilterBlocks(true)
.setHashIndexAllowCollision(false)
.setBlockCacheCompressedSize(64 * SizeUnit.KB)
.setBlockCacheCompressedNumShardBits(10);
assert(table_options.blockCacheSize() == 64 * SizeUnit.KB);
assert(table_options.cacheNumShardBits() == 6);
assert(table_options.blockSizeDeviation() == 5);
assert(table_options.blockRestartInterval() == 10);
assert(table_options.cacheIndexAndFilterBlocks() == true);
assert(table_options.hashIndexAllowCollision() == false);
assert(table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
assert(table_options.blockCacheCompressedNumShardBits() == 10);
options.setTableFormatConfig(table_options);
assert(options.tableFactoryName().equals("BlockBasedTable"));
try {
db = RocksDB.open(options, db_path);
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
assert("world".equals(new String(value)));
String str = db.getProperty("rocksdb.stats");
assert(str != null && !str.equals(""));
} catch (RocksDBException e) {
System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e);
assert(db == null);
assert(false);
}
// be sure to release the c++ pointer
db.close();
ReadOptions readOptions = new ReadOptions();
readOptions.setFillCache(false);
try {
db = RocksDB.open(options, db_path);
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
System.out.format("Get('hello') = %s\n",
new String(value));
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
db.put(String.format("%dx%d", i, j).getBytes(),
String.format("%d", i * j).getBytes());
}
Statistics stats = options.statisticsPtr();
assert (options.createIfMissing() == true);
assert (options.writeBufferSize() == 8 * SizeUnit.KB);
assert (options.maxWriteBufferNumber() == 3);
assert (options.maxBackgroundCompactions() == 10);
assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
assert (options.compactionStyle() == CompactionStyle.UNIVERSAL);
assert (options.memTableFactoryName().equals("SkipListFactory"));
options.setMemTableConfig(
new HashSkipListMemTableConfig()
.setHeight(4)
.setBranchingFactor(4)
.setBucketCount(2000000));
assert (options.memTableFactoryName().equals("HashSkipListRepFactory"));
options.setMemTableConfig(
new HashLinkedListMemTableConfig()
.setBucketCount(100000));
assert (options.memTableFactoryName().equals("HashLinkedListRepFactory"));
options.setMemTableConfig(
new VectorMemTableConfig().setReservedSize(10000));
assert (options.memTableFactoryName().equals("VectorRepFactory"));
options.setMemTableConfig(new SkipListMemTableConfig());
assert (options.memTableFactoryName().equals("SkipListFactory"));
options.setTableFormatConfig(new PlainTableConfig());
// Plain-Table requires mmap read
options.setAllowMmapReads(true);
assert (options.tableFactoryName().equals("PlainTable"));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000,
10000, 10));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000));
final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB)
.setFilter(bloomFilter)
.setCacheNumShardBits(6)
.setBlockSizeDeviation(5)
.setBlockRestartInterval(10)
.setCacheIndexAndFilterBlocks(true)
.setHashIndexAllowCollision(false)
.setBlockCacheCompressedSize(64 * SizeUnit.KB)
.setBlockCacheCompressedNumShardBits(10);
assert (table_options.blockCacheSize() == 64 * SizeUnit.KB);
assert (table_options.cacheNumShardBits() == 6);
assert (table_options.blockSizeDeviation() == 5);
assert (table_options.blockRestartInterval() == 10);
assert (table_options.cacheIndexAndFilterBlocks() == true);
assert (table_options.hashIndexAllowCollision() == false);
assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
assert (table_options.blockCacheCompressedNumShardBits() == 10);
options.setTableFormatConfig(table_options);
assert (options.tableFactoryName().equals("BlockBasedTable"));
try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
assert ("world".equals(new String(value)));
String str = db.getProperty("rocksdb.stats");
assert (str != null && !str.equals(""));
} catch (RocksDBException e) {
System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e);
assert (false);
}
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
System.out.format("%s ", new String(db.get(
String.format("%dx%d", i, j).getBytes())));
try (final RocksDB db = RocksDB.open(options, db_path)) {
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
System.out.format("Get('hello') = %s\n",
new String(value));
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
db.put(String.format("%dx%d", i, j).getBytes(),
String.format("%d", i * j).getBytes());
}
}
for (int i = 1; i <= 9; ++i) {
for (int j = 1; j <= 9; ++j) {
System.out.format("%s ", new String(db.get(
String.format("%dx%d", i, j).getBytes())));
}
System.out.println("");
}
System.out.println("");
}
// write batch test
WriteOptions writeOpt = new WriteOptions();
for (int i = 10; i <= 19; ++i) {
WriteBatch batch = new WriteBatch();
for (int j = 10; j <= 19; ++j) {
batch.put(String.format("%dx%d", i, j).getBytes(),
// write batch test
try (final WriteOptions writeOpt = new WriteOptions()) {
for (int i = 10; i <= 19; ++i) {
try (final WriteBatch batch = new WriteBatch()) {
for (int j = 10; j <= 19; ++j) {
batch.put(String.format("%dx%d", i, j).getBytes(),
String.format("%d", i * j).getBytes());
}
db.write(writeOpt, batch);
}
}
}
db.write(writeOpt, batch);
batch.dispose();
}
for (int i = 10; i <= 19; ++i) {
for (int j = 10; j <= 19; ++j) {
assert(new String(
db.get(String.format("%dx%d", i, j).getBytes())).equals(
String.format("%d", i * j)));
System.out.format("%s ", new String(db.get(
String.format("%dx%d", i, j).getBytes())));
for (int i = 10; i <= 19; ++i) {
for (int j = 10; j <= 19; ++j) {
assert (new String(
db.get(String.format("%dx%d", i, j).getBytes())).equals(
String.format("%d", i * j)));
System.out.format("%s ", new String(db.get(
String.format("%dx%d", i, j).getBytes())));
}
System.out.println("");
}
System.out.println("");
}
writeOpt.dispose();
value = db.get("1x1".getBytes());
assert(value != null);
value = db.get("world".getBytes());
assert(value == null);
value = db.get(readOptions, "world".getBytes());
assert(value == null);
byte[] testKey = "asdf".getBytes();
byte[] testValue =
"asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
db.put(testKey, testValue);
byte[] testResult = db.get(testKey);
assert(testResult != null);
assert(Arrays.equals(testValue, testResult));
assert(new String(testValue).equals(new String(testResult)));
testResult = db.get(readOptions, testKey);
assert(testResult != null);
assert(Arrays.equals(testValue, testResult));
assert(new String(testValue).equals(new String(testResult)));
byte[] insufficientArray = new byte[10];
byte[] enoughArray = new byte[50];
int len;
len = db.get(testKey, insufficientArray);
assert(len > insufficientArray.length);
len = db.get("asdfjkl;".getBytes(), enoughArray);
assert(len == RocksDB.NOT_FOUND);
len = db.get(testKey, enoughArray);
assert(len == testValue.length);
len = db.get(readOptions, testKey, insufficientArray);
assert(len > insufficientArray.length);
len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray);
assert(len == RocksDB.NOT_FOUND);
len = db.get(readOptions, testKey, enoughArray);
assert(len == testValue.length);
db.remove(testKey);
len = db.get(testKey, enoughArray);
assert(len == RocksDB.NOT_FOUND);
// repeat the test with WriteOptions
WriteOptions writeOpts = new WriteOptions();
writeOpts.setSync(true);
writeOpts.setDisableWAL(true);
db.put(writeOpts, testKey, testValue);
len = db.get(testKey, enoughArray);
assert(len == testValue.length);
assert(new String(testValue).equals(
new String(enoughArray, 0, len)));
writeOpts.dispose();
try {
for (TickerType statsType : TickerType.values()) {
stats.getTickerCount(statsType);
value = db.get("1x1".getBytes());
assert (value != null);
value = db.get("world".getBytes());
assert (value == null);
value = db.get(readOptions, "world".getBytes());
assert (value == null);
byte[] testKey = "asdf".getBytes();
byte[] testValue =
"asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
db.put(testKey, testValue);
byte[] testResult = db.get(testKey);
assert (testResult != null);
assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult)));
testResult = db.get(readOptions, testKey);
assert (testResult != null);
assert (Arrays.equals(testValue, testResult));
assert (new String(testValue).equals(new String(testResult)));
byte[] insufficientArray = new byte[10];
byte[] enoughArray = new byte[50];
int len;
len = db.get(testKey, insufficientArray);
assert (len > insufficientArray.length);
len = db.get("asdfjkl;".getBytes(), enoughArray);
assert (len == RocksDB.NOT_FOUND);
len = db.get(testKey, enoughArray);
assert (len == testValue.length);
len = db.get(readOptions, testKey, insufficientArray);
assert (len > insufficientArray.length);
len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray);
assert (len == RocksDB.NOT_FOUND);
len = db.get(readOptions, testKey, enoughArray);
assert (len == testValue.length);
db.remove(testKey);
len = db.get(testKey, enoughArray);
assert (len == RocksDB.NOT_FOUND);
// repeat the test with WriteOptions
try (final WriteOptions writeOpts = new WriteOptions()) {
writeOpts.setSync(true);
writeOpts.setDisableWAL(true);
db.put(writeOpts, testKey, testValue);
len = db.get(testKey, enoughArray);
assert (len == testValue.length);
assert (new String(testValue).equals(
new String(enoughArray, 0, len)));
}
System.out.println("getTickerCount() passed.");
} catch (Exception e) {
System.out.println("Failed in call to getTickerCount()");
assert(false); //Should never reach here.
}
try {
for (HistogramType histogramType : HistogramType.values()) {
HistogramData data = stats.geHistogramData(histogramType);
try {
for (TickerType statsType : TickerType.values()) {
stats.getTickerCount(statsType);
}
System.out.println("getTickerCount() passed.");
} catch (Exception e) {
System.out.println("Failed in call to getTickerCount()");
assert (false); //Should never reach here.
}
System.out.println("geHistogramData() passed.");
} catch (Exception e) {
System.out.println("Failed in call to geHistogramData()");
assert(false); //Should never reach here.
}
RocksIterator iterator = db.newIterator();
boolean seekToFirstPassed = false;
for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
iterator.status();
assert(iterator.key() != null);
assert(iterator.value() != null);
seekToFirstPassed = true;
}
if(seekToFirstPassed) {
System.out.println("iterator seekToFirst tests passed.");
}
boolean seekToLastPassed = false;
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
iterator.status();
assert(iterator.key() != null);
assert(iterator.value() != null);
seekToLastPassed = true;
}
if(seekToLastPassed) {
System.out.println("iterator seekToLastPassed tests passed.");
}
iterator.seekToFirst();
iterator.seek(iterator.key());
assert(iterator.key() != null);
assert(iterator.value() != null);
try {
for (HistogramType histogramType : HistogramType.values()) {
HistogramData data = stats.geHistogramData(histogramType);
}
System.out.println("geHistogramData() passed.");
} catch (Exception e) {
System.out.println("Failed in call to geHistogramData()");
assert (false); //Should never reach here.
}
System.out.println("iterator seek test passed.");
try (final RocksIterator iterator = db.newIterator()) {
boolean seekToFirstPassed = false;
for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
iterator.status();
assert (iterator.key() != null);
assert (iterator.value() != null);
seekToFirstPassed = true;
}
if (seekToFirstPassed) {
System.out.println("iterator seekToFirst tests passed.");
}
boolean seekToLastPassed = false;
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
iterator.status();
assert (iterator.key() != null);
assert (iterator.value() != null);
seekToLastPassed = true;
}
if (seekToLastPassed) {
System.out.println("iterator seekToLastPassed tests passed.");
}
iterator.seekToFirst();
iterator.seek(iterator.key());
assert (iterator.key() != null);
assert (iterator.value() != null);
System.out.println("iterator seek test passed.");
iterator.dispose();
System.out.println("iterator tests passed.");
}
System.out.println("iterator tests passed.");
iterator = db.newIterator();
List<byte[]> keys = new ArrayList<byte[]>();
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
keys.add(iterator.key());
}
iterator.dispose();
final List<byte[]> keys = new ArrayList<>();
try (final RocksIterator iterator = db.newIterator()) {
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
keys.add(iterator.key());
}
}
Map<byte[], byte[]> values = db.multiGet(keys);
assert(values.size() == keys.size());
for(byte[] value1 : values.values()) {
assert(value1 != null);
}
Map<byte[], byte[]> values = db.multiGet(keys);
assert (values.size() == keys.size());
for (byte[] value1 : values.values()) {
assert (value1 != null);
}
values = db.multiGet(new ReadOptions(), keys);
assert(values.size() == keys.size());
for(byte[] value1 : values.values()) {
assert(value1 != null);
values = db.multiGet(new ReadOptions(), keys);
assert (values.size() == keys.size());
for (byte[] value1 : values.values()) {
assert (value1 != null);
}
} catch (RocksDBException e) {
System.err.println(e);
}
} catch (RocksDBException e) {
System.err.println(e);
}
if (db != null) {
db.close();
}
// be sure to dispose c++ pointers
options.dispose();
readOptions.dispose();
}
}

@ -8,22 +8,23 @@ package org.rocksdb;
* A CompactionFilter allows an application to modify/delete a key-value at
* the time of compaction.
*
* At present we just permit an overriding Java class to wrap a C++ implementation
* At present we just permit an overriding Java class to wrap a C++
* implementation
*/
public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>>
extends RocksObject {
protected AbstractCompactionFilter(final long nativeHandle) {
super(nativeHandle);
}
/**
* Deletes underlying C++ comparator pointer.
* Deletes underlying C++ compaction pointer.
*
* Note that this function should be called only after all
* RocksDB instances referencing the comparator are closed.
* RocksDB instances referencing the compaction filter are closed.
* Otherwise an undefined behavior will occur.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
@Override
protected final native void disposeInternal(final long handle);
}

@ -15,7 +15,11 @@ package org.rocksdb;
* @see org.rocksdb.DirectComparator
*/
public abstract class AbstractComparator<T extends AbstractSlice<?>>
extends RocksObject {
extends AbstractImmutableNativeReference {
protected AbstractComparator() {
super(true);
}
/**
* The name of the comparator. Used to check for comparator
@ -91,10 +95,12 @@ public abstract class AbstractComparator<T extends AbstractSlice<?>>
* RocksDB instances referencing the comparator are closed.
* Otherwise an undefined behavior will occur.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
@Override
protected void disposeInternal() {
disposeInternal(getNativeHandle());
}
private native void disposeInternal(long handle);
protected abstract long getNativeHandle();
private native void disposeInternal(final long handle);
}

@ -0,0 +1,66 @@
// Copyright (c) 2016, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Offers functionality for implementations of
* {@link AbstractNativeReference} which have an immutable reference to the
* underlying native C++ object
*/
public abstract class AbstractImmutableNativeReference
extends AbstractNativeReference {
/**
* A flag indicating whether the current {@code AbstractNativeReference} is
* responsible to free the underlying C++ object
*/
private final AtomicBoolean owningHandle_;
protected AbstractImmutableNativeReference(final boolean owningHandle) {
this.owningHandle_ = new AtomicBoolean(owningHandle);
}
@Override
public boolean isOwningHandle() {
return owningHandle_.get();
}
/**
* Releases this {@code AbstractNativeReference} from the responsibility of
* freeing the underlying native C++ object
* <p>
* This will prevent the object from attempting to delete the underlying
* native object in its finalizer. This must be used when another object
* takes over ownership of the native object or both will attempt to delete
* the underlying object when garbage collected.
* <p>
* When {@code disOwnNativeHandle()} is called, {@code dispose()} will
* subsequently take no action. As a result, incorrect use of this function
* may cause a memory leak.
* </p>
*
* @see #dispose()
*/
protected final void disOwnNativeHandle() {
owningHandle_.set(false);
}
@Override
public void close() {
if (owningHandle_.compareAndSet(true, false)) {
disposeInternal();
}
}
/**
* The helper function of {@link AbstractImmutableNativeReference#dispose()}
* which all subclasses of {@code AbstractImmutableNativeReference} must
* implement to release their underlying native C++ objects.
*/
protected abstract void disposeInternal();
}

@ -0,0 +1,76 @@
// Copyright (c) 2016, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* AbstractNativeReference is the base-class of all RocksDB classes that have
* a pointer to a native C++ {@code rocksdb} object.
* <p>
* AbstractNativeReference has the {@link AbstractNativeReference#dispose()}
* method, which frees its associated C++ object.</p>
* <p>
* This function should be called manually, however, if required it will be
* called automatically during the regular Java GC process via
* {@link AbstractNativeReference#finalize()}.</p>
* <p>
* Note - Java can only see the long member variable (which is the C++ pointer
* value to the native object), as such it does not know the real size of the
* object and therefore may assign a low GC priority for it; So it is strongly
* suggested that you manually dispose of objects when you are finished with
* them.</p>
*/
public abstract class AbstractNativeReference implements AutoCloseable {
/**
* Returns true if we are responsible for freeing the underlying C++ object
*
* @return true if we are responsible to free the C++ object
* @see #dispose()
*/
protected abstract boolean isOwningHandle();
/**
* Frees the underlying C++ object
* <p>
* It is strong recommended that the developer calls this after they
* have finished using the object.</p>
* <p>
* Note, that once an instance of {@link AbstractNativeReference} has been
* disposed, calling any of its functions will lead to undefined
* behavior.</p>
*/
@Override
public abstract void close();
/**
* @deprecated Instead use {@link AbstractNativeReference#close()}
*/
@Deprecated
public final void dispose() {
close();
}
/**
* Simply calls {@link AbstractNativeReference#dispose()} to free
* any underlying C++ object reference which has not yet been manually
* released.
*
* @deprecated You should not rely on GC of Rocks objects, and instead should
* either call {@link AbstractNativeReference#close()} manually or make
* use of some sort of ARM (Automatic Resource Management) such as
* Java 7's <a href="https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">try-with-resources</a>
* statement
*/
@Override
@Deprecated
protected void finalize() throws Throwable {
if(isOwningHandle()) {
//TODO(AR) log a warning message... developer should have called close()
}
dispose();
super.finalize();
}
}

@ -25,8 +25,7 @@ public abstract class AbstractRocksIterator<P extends RocksObject>
protected AbstractRocksIterator(final P parent,
final long nativeHandle) {
super();
nativeHandle_ = nativeHandle;
super(nativeHandle);
// parent must point to a valid RocksDB instance.
assert (parent != null);
// RocksIterator must hold a reference to the related parent instance
@ -37,43 +36,43 @@ public abstract class AbstractRocksIterator<P extends RocksObject>
@Override
public boolean isValid() {
assert (isInitialized());
assert (isOwningHandle());
return isValid0(nativeHandle_);
}
@Override
public void seekToFirst() {
assert (isInitialized());
assert (isOwningHandle());
seekToFirst0(nativeHandle_);
}
@Override
public void seekToLast() {
assert (isInitialized());
assert (isOwningHandle());
seekToLast0(nativeHandle_);
}
@Override
public void seek(byte[] target) {
assert (isInitialized());
assert (isOwningHandle());
seek0(nativeHandle_, target, target.length);
}
@Override
public void next() {
assert (isInitialized());
assert (isOwningHandle());
next0(nativeHandle_);
}
@Override
public void prev() {
assert (isInitialized());
assert (isOwningHandle());
prev0(nativeHandle_);
}
@Override
public void status() throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
status0(nativeHandle_);
}
@ -87,15 +86,11 @@ public abstract class AbstractRocksIterator<P extends RocksObject>
*/
@Override
protected void disposeInternal() {
synchronized (parent_) {
assert (isInitialized());
if (parent_.isInitialized()) {
if (parent_.isOwningHandle()) {
disposeInternal(nativeHandle_);
}
}
}
abstract void disposeInternal(long handle);
abstract boolean isValid0(long handle);
abstract void seekToFirst0(long handle);
abstract void seekToLast0(long handle);

@ -24,7 +24,15 @@ package org.rocksdb;
* C++ BaseComparatorJniCallback subclass, which in turn destroys the
* Java @see org.rocksdb.AbstractSlice subclass Objects.
*/
abstract class AbstractSlice<T> extends RocksObject {
abstract class AbstractSlice<T> extends RocksMutableObject {
protected AbstractSlice() {
super();
}
protected AbstractSlice(final long nativeHandle) {
super(nativeHandle);
}
/**
* Returns the data of the slice.
@ -34,8 +42,7 @@ abstract class AbstractSlice<T> extends RocksObject {
* @see org.rocksdb.AbstractSlice#data0(long)
*/
public T data() {
assert (isInitialized());
return data0(nativeHandle_);
return data0(getNativeHandle());
}
/**
@ -56,8 +63,7 @@ abstract class AbstractSlice<T> extends RocksObject {
* @return The length in bytes.
*/
public int size() {
assert (isInitialized());
return size0(nativeHandle_);
return size0(getNativeHandle());
}
/**
@ -67,8 +73,7 @@ abstract class AbstractSlice<T> extends RocksObject {
* @return true if there is no data, false otherwise.
*/
public boolean empty() {
assert (isInitialized());
return empty0(nativeHandle_);
return empty0(getNativeHandle());
}
/**
@ -80,8 +85,7 @@ abstract class AbstractSlice<T> extends RocksObject {
* @return The string representation of the data.
*/
public String toString(final boolean hex) {
assert (isInitialized());
return toString0(nativeHandle_, hex);
return toString0(getNativeHandle(), hex);
}
@Override
@ -101,8 +105,15 @@ abstract class AbstractSlice<T> extends RocksObject {
*/
public int compare(final AbstractSlice<?> other) {
assert (other != null);
assert (isInitialized());
return compare0(nativeHandle_, other.nativeHandle_);
if(!isOwningHandle()) {
return other.isOwningHandle() ? -1 : 0;
} else {
if(!other.isOwningHandle()) {
return 1;
} else {
return compare0(getNativeHandle(), other.getNativeHandle());
}
}
}
@Override
@ -141,13 +152,19 @@ abstract class AbstractSlice<T> extends RocksObject {
*/
public boolean startsWith(final AbstractSlice<?> prefix) {
if (prefix != null) {
assert (isInitialized());
return startsWith0(nativeHandle_, prefix.nativeHandle_);
return startsWith0(getNativeHandle(), prefix.getNativeHandle());
} else {
return false;
}
}
protected native static long createNewSliceFromString(final String str);
private native int size0(long handle);
private native boolean empty0(long handle);
private native String toString0(long handle, boolean hex);
private native int compare0(long handle, long otherHandle);
private native boolean startsWith0(long handle, long otherHandle);
/**
* Deletes underlying C++ slice pointer.
* Note that this function should be called only after all
@ -155,17 +172,6 @@ abstract class AbstractSlice<T> extends RocksObject {
* Otherwise an undefined behavior will occur.
*/
@Override
protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
protected native void createNewSliceFromString(String str);
private native int size0(long handle);
private native boolean empty0(long handle);
private native String toString0(long handle, boolean hex);
private native int compare0(long handle, long otherHandle);
private native boolean startsWith0(long handle, long otherHandle);
private native void disposeInternal(long handle);
protected final native void disposeInternal(final long handle);
}

@ -5,88 +5,93 @@
package org.rocksdb;
public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface {
public abstract class AbstractWriteBatch extends RocksObject
implements WriteBatchInterface {
protected AbstractWriteBatch(final long nativeHandle) {
super(nativeHandle);
}
@Override
public int count() {
assert (isInitialized());
return count0();
assert (isOwningHandle());
return count0(nativeHandle_);
}
@Override
public void put(byte[] key, byte[] value) {
assert (isInitialized());
put(key, key.length, value, value.length);
assert (isOwningHandle());
put(nativeHandle_, key, key.length, value, value.length);
}
@Override
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) {
assert (isInitialized());
put(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_);
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key,
byte[] value) {
assert (isOwningHandle());
put(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
}
@Override
public void merge(byte[] key, byte[] value) {
assert (isInitialized());
merge(key, key.length, value, value.length);
assert (isOwningHandle());
merge(nativeHandle_, key, key.length, value, value.length);
}
@Override
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) {
assert (isInitialized());
merge(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_);
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key,
byte[] value) {
assert (isOwningHandle());
merge(nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
}
@Override
public void remove(byte[] key) {
assert (isInitialized());
remove(key, key.length);
assert (isOwningHandle());
remove(nativeHandle_, key, key.length);
}
@Override
public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) {
assert (isInitialized());
remove(key, key.length, columnFamilyHandle.nativeHandle_);
assert (isOwningHandle());
remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
}
@Override
public void putLogData(byte[] blob) {
assert (isInitialized());
putLogData(blob, blob.length);
assert (isOwningHandle());
putLogData(nativeHandle_, blob, blob.length);
}
@Override
public void clear() {
assert (isInitialized());
clear0();
}
/**
* Delete the c++ side pointer.
*/
@Override
protected void disposeInternal() {
assert (isInitialized());
disposeInternal(nativeHandle_);
assert (isOwningHandle());
clear0(nativeHandle_);
}
abstract void disposeInternal(long handle);
abstract int count0();
abstract int count0(final long handle);
abstract void put(byte[] key, int keyLen, byte[] value, int valueLen);
abstract void put(final long handle, final byte[] key, final int keyLen,
final byte[] value, final int valueLen);
abstract void put(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle);
abstract void put(final long handle, final byte[] key, final int keyLen,
final byte[] value, final int valueLen, final long cfHandle);
abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen);
abstract void merge(final long handle, final byte[] key, final int keyLen,
final byte[] value, final int valueLen);
abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle);
abstract void merge(final long handle, final byte[] key, final int keyLen,
final byte[] value, final int valueLen, final long cfHandle);
abstract void remove(byte[] key, int keyLen);
abstract void remove(final long handle, final byte[] key,
final int keyLen);
abstract void remove(byte[] key, int keyLen, long cfHandle);
abstract void remove(final long handle, final byte[] key,
final int keyLen, final long cfHandle);
abstract void putLogData(byte[] blob, int blobLen);
abstract void putLogData(final long handle, final byte[] blob,
final int blobLen);
abstract void clear0();
abstract void clear0(final long handle);
}

@ -19,8 +19,8 @@ import java.util.List;
*/
public class BackupEngine extends RocksObject implements AutoCloseable {
protected BackupEngine() {
super();
protected BackupEngine(final long nativeHandle) {
super(nativeHandle);
}
/**
@ -30,12 +30,11 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* @param options Any options for the backup engine
*
* @return A new BackupEngine instance
* @throws RocksDBException thrown if the backup engine could not be opened
*/
public static BackupEngine open(final Env env,
final BackupableDBOptions options) throws RocksDBException {
final BackupEngine be = new BackupEngine();
be.open(env.nativeHandle_, options.nativeHandle_);
return be;
return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_));
}
/**
@ -47,6 +46,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* @param db The database to backup
*
* Note - This method is not thread safe
*
* @throws RocksDBException thrown if a new backup could not be created
*/
public void createNewBackup(final RocksDB db) throws RocksDBException {
createNewBackup(db, false);
@ -70,11 +71,13 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* parameter.
*
* Note - This method is not thread safe
*
* @throws RocksDBException thrown if a new backup could not be created
*/
public void createNewBackup(
final RocksDB db, final boolean flushBeforeBackup)
throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup);
}
@ -85,7 +88,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* @return A list of information about each available backup
*/
public List<BackupInfo> getBackupInfo() {
assert (isInitialized());
assert (isOwningHandle());
return getBackupInfo(nativeHandle_);
}
@ -97,7 +100,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* @return array of backup ids as int ids.
*/
public int[] getCorruptedBackups() {
assert(isInitialized());
assert(isOwningHandle());
return getCorruptedBackups(nativeHandle_);
}
@ -110,7 +113,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* native library.
*/
public void garbageCollect() throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
garbageCollect(nativeHandle_);
}
@ -118,10 +121,12 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* Deletes old backups, keeping just the latest numBackupsToKeep
*
* @param numBackupsToKeep The latest n backups to keep
*
* @throws RocksDBException thrown if the old backups could not be deleted
*/
public void purgeOldBackups(
final int numBackupsToKeep) throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
purgeOldBackups(nativeHandle_, numBackupsToKeep);
}
@ -129,9 +134,11 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* Deletes a backup
*
* @param backupId The id of the backup to delete
*
* @throws RocksDBException thrown if the backup could not be deleted
*/
public void deleteBackup(final int backupId) throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
deleteBackup(nativeHandle_, backupId);
}
@ -154,11 +161,13 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
* @param walDir The location of the log files for your database,
* often the same as dbDir
* @param restoreOptions Options for controlling the restore
*
* @throws RocksDBException thrown if the database could not be restored
*/
public void restoreDbFromBackup(
final int backupId, final String dbDir, final String walDir,
final RestoreOptions restoreOptions) throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir,
restoreOptions.nativeHandle_);
}
@ -166,34 +175,24 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
/**
* Restore the database from the latest backup
*
* @param dbDir The directory to restore the backup to, i.e. where your database is
* @param walDir The location of the log files for your database, often the same as dbDir
* @param dbDir The directory to restore the backup to, i.e. where your
* database is
* @param walDir The location of the log files for your database, often the
* same as dbDir
* @param restoreOptions Options for controlling the restore
*
* @throws RocksDBException thrown if the database could not be restored
*/
public void restoreDbFromLatestBackup(
final String dbDir, final String walDir,
final RestoreOptions restoreOptions) throws RocksDBException {
assert (isInitialized());
assert (isOwningHandle());
restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir,
restoreOptions.nativeHandle_);
}
/**
* Close the Backup Engine
*/
@Override
public void close() throws RocksDBException {
dispose();
}
@Override
protected void disposeInternal() {
assert (isInitialized());
disposeInternal(nativeHandle_);
}
private native void open(final long env, final long backupableDbOptions)
throws RocksDBException;
private native static long open(final long env,
final long backupableDbOptions) throws RocksDBException;
private native void createNewBackup(final long handle, final long dbHandle,
final boolean flushBeforeBackup) throws RocksDBException;
@ -218,5 +217,5 @@ public class BackupEngine extends RocksObject implements AutoCloseable {
final String dbDir, final String walDir, final long restoreOptionsHandle)
throws RocksDBException;
private native void disposeInternal(final long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -21,8 +21,8 @@ public class BackupableDB extends RocksDB {
*
* @param opt {@link org.rocksdb.Options} to set for the database.
* @param bopt {@link org.rocksdb.BackupableDBOptions} to use.
* @param db_path Path to store data to. The path for storing the backup should be
* specified in the {@link org.rocksdb.BackupableDBOptions}.
* @param db_path Path to store data to. The path for storing the backup
* should be specified in the {@link org.rocksdb.BackupableDBOptions}.
*
* @return {@link BackupableDB} reference to the opened database.
*
@ -33,9 +33,9 @@ public class BackupableDB extends RocksDB {
final Options opt, final BackupableDBOptions bopt, final String db_path)
throws RocksDBException {
RocksDB db = RocksDB.open(opt, db_path);
BackupableDB bdb = new BackupableDB();
bdb.open(db.nativeHandle_, bopt.nativeHandle_);
final RocksDB db = RocksDB.open(opt, db_path);
final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_,
bopt.nativeHandle_));
// Prevent the RocksDB object from attempting to delete
// the underly C++ DB object.
@ -56,7 +56,7 @@ public class BackupableDB extends RocksDB {
*/
public void createNewBackup(final boolean flushBeforeBackup)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
createNewBackup(nativeHandle_, flushBeforeBackup);
}
@ -70,7 +70,7 @@ public class BackupableDB extends RocksDB {
*/
public void purgeOldBackups(final int numBackupsToKeep)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
purgeOldBackups(nativeHandle_, numBackupsToKeep);
}
@ -83,7 +83,7 @@ public class BackupableDB extends RocksDB {
* native library.
*/
public void deleteBackup(final int backupId) throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
deleteBackup0(nativeHandle_, backupId);
}
@ -94,7 +94,7 @@ public class BackupableDB extends RocksDB {
* @return List of {@link BackupInfo} instances.
*/
public List<BackupInfo> getBackupInfos() {
assert(isInitialized());
assert(isOwningHandle());
return getBackupInfo(nativeHandle_);
}
@ -106,7 +106,7 @@ public class BackupableDB extends RocksDB {
* @return array of backup ids as int ids.
*/
public int[] getCorruptedBackups() {
assert(isInitialized());
assert(isOwningHandle());
return getCorruptedBackups(nativeHandle_);
}
@ -119,7 +119,7 @@ public class BackupableDB extends RocksDB {
* native library.
*/
public void garbageCollect() throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
garbageCollect(nativeHandle_);
}
@ -132,19 +132,19 @@ public class BackupableDB extends RocksDB {
* of the c++ {@code rocksdb::BackupableDB} and should be transparent
* to Java developers.</p>
*/
@Override public synchronized void close() {
if (isInitialized()) {
@Override public void close() {
super.close();
}
}
/**
* <p>A protected construction that will be used in the static
* factory method {@link #open(Options, BackupableDBOptions, String)}.
* </p>
*
* @param nativeHandle The native handle of the C++ BackupableDB object
*/
protected BackupableDB() {
super();
protected BackupableDB(final long nativeHandle) {
super(nativeHandle);
}
@Override protected void finalize() throws Throwable {
@ -152,7 +152,8 @@ public class BackupableDB extends RocksDB {
super.finalize();
}
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
protected native static long open(final long rocksDBHandle,
final long backupDBOptionsHandle);
protected native void createNewBackup(long handle, boolean flag)
throws RocksDBException;
protected native void purgeOldBackups(long handle, int numBackupsToKeep)

@ -6,7 +6,6 @@
package org.rocksdb;
import java.io.File;
import java.nio.file.Path;
/**
* <p>BackupableDBOptions to control the behavior of a backupable database.
@ -22,17 +21,22 @@ public class BackupableDBOptions extends RocksObject {
/**
* <p>BackupableDBOptions constructor.</p>
*
* @param path Where to keep the backup files. Has to be different than db name.
* Best to set this to {@code db name_ + "/backups"}
* @param path Where to keep the backup files. Has to be different than db
* name. Best to set this to {@code db name_ + "/backups"}
* @throws java.lang.IllegalArgumentException if illegal path is used.
*/
public BackupableDBOptions(final String path) {
super();
File backupPath = path == null ? null : new File(path);
if (backupPath == null || !backupPath.isDirectory() || !backupPath.canWrite()) {
super(newBackupableDBOptions(ensureWritableFile(path)));
}
private static String ensureWritableFile(final String path) {
final File backupPath = path == null ? null : new File(path);
if (backupPath == null || !backupPath.isDirectory() ||
!backupPath.canWrite()) {
throw new IllegalArgumentException("Illegal path provided.");
} else {
return path;
}
newBackupableDBOptions(path);
}
/**
@ -41,24 +45,25 @@ public class BackupableDBOptions extends RocksObject {
* @return the path to the BackupableDB directory.
*/
public String backupDir() {
assert(isInitialized());
assert(isOwningHandle());
return backupDir(nativeHandle_);
}
/**
* <p>Share table files between backups.</p>
*
* @param shareTableFiles If {@code share_table_files == true}, backup will assume
* that table files with same name have the same contents. This enables incremental
* backups and avoids unnecessary data copies. If {@code share_table_files == false},
* each backup will be on its own and will not share any data with other backups.
* @param shareTableFiles If {@code share_table_files == true}, backup will
* assume that table files with same name have the same contents. This
* enables incremental backups and avoids unnecessary data copies. If
* {@code share_table_files == false}, each backup will be on its own and
* will not share any data with other backups.
*
* <p>Default: true</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setShareTableFiles(final boolean shareTableFiles) {
assert(isInitialized());
assert(isOwningHandle());
setShareTableFiles(nativeHandle_, shareTableFiles);
return this;
}
@ -70,24 +75,24 @@ public class BackupableDBOptions extends RocksObject {
* backups.
*/
public boolean shareTableFiles() {
assert(isInitialized());
assert(isOwningHandle());
return shareTableFiles(nativeHandle_);
}
/**
* <p>Set synchronous backups.</p>
*
* @param sync If {@code sync == true}, we can guarantee you'll get consistent backup
* even on a machine crash/reboot. Backup process is slower with sync enabled.
* If {@code sync == false}, we don't guarantee anything on machine reboot.
* However,chances are some of the backups are consistent.
* @param sync If {@code sync == true}, we can guarantee you'll get consistent
* backup even on a machine crash/reboot. Backup process is slower with sync
* enabled. If {@code sync == false}, we don't guarantee anything on machine
* reboot. However, chances are some of the backups are consistent.
*
* <p>Default: true</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setSync(final boolean sync) {
assert(isInitialized());
assert(isOwningHandle());
setSync(nativeHandle_, sync);
return this;
}
@ -98,21 +103,22 @@ public class BackupableDBOptions extends RocksObject {
* @return boolean value if synchronous backups are configured.
*/
public boolean sync() {
assert(isInitialized());
assert(isOwningHandle());
return sync(nativeHandle_);
}
/**
* <p>Set if old data will be destroyed.</p>
*
* @param destroyOldData If true, it will delete whatever backups there are already.
* @param destroyOldData If true, it will delete whatever backups there are
* already.
*
* <p>Default: false</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setDestroyOldData(final boolean destroyOldData) {
assert(isInitialized());
assert(isOwningHandle());
setDestroyOldData(nativeHandle_, destroyOldData);
return this;
}
@ -123,23 +129,23 @@ public class BackupableDBOptions extends RocksObject {
* @return boolean value indicating if old data will be destroyed.
*/
public boolean destroyOldData() {
assert(isInitialized());
assert(isOwningHandle());
return destroyOldData(nativeHandle_);
}
/**
* <p>Set if log files shall be persisted.</p>
*
* @param backupLogFiles If false, we won't backup log files. This option can be
* useful for backing up in-memory databases where log file are persisted,but table
* files are in memory.
* @param backupLogFiles If false, we won't backup log files. This option can
* be useful for backing up in-memory databases where log file are
* persisted, but table files are in memory.
*
* <p>Default: true</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setBackupLogFiles(final boolean backupLogFiles) {
assert(isInitialized());
assert(isOwningHandle());
setBackupLogFiles(nativeHandle_, backupLogFiles);
return this;
}
@ -150,73 +156,76 @@ public class BackupableDBOptions extends RocksObject {
* @return boolean value indicating if log files will be persisted.
*/
public boolean backupLogFiles() {
assert(isInitialized());
assert(isOwningHandle());
return backupLogFiles(nativeHandle_);
}
/**
* <p>Set backup rate limit.</p>
*
* @param backupRateLimit Max bytes that can be transferred in a second during backup.
* If 0 or negative, then go as fast as you can.
* @param backupRateLimit Max bytes that can be transferred in a second during
* backup. If 0 or negative, then go as fast as you can.
*
* <p>Default: 0</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setBackupRateLimit(long backupRateLimit) {
assert(isInitialized());
assert(isOwningHandle());
backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit;
setBackupRateLimit(nativeHandle_, backupRateLimit);
return this;
}
/**
* <p>Return backup rate limit which described the max bytes that can be transferred in a
* second during backup.</p>
* <p>Return backup rate limit which described the max bytes that can be
* transferred in a second during backup.</p>
*
* @return numerical value describing the backup transfer limit in bytes per second.
* @return numerical value describing the backup transfer limit in bytes per
* second.
*/
public long backupRateLimit() {
assert(isInitialized());
assert(isOwningHandle());
return backupRateLimit(nativeHandle_);
}
/**
* <p>Set restore rate limit.</p>
*
* @param restoreRateLimit Max bytes that can be transferred in a second during restore.
* If 0 or negative, then go as fast as you can.
* @param restoreRateLimit Max bytes that can be transferred in a second
* during restore. If 0 or negative, then go as fast as you can.
*
* <p>Default: 0</p>
*
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setRestoreRateLimit(long restoreRateLimit) {
assert(isInitialized());
assert(isOwningHandle());
restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit;
setRestoreRateLimit(nativeHandle_, restoreRateLimit);
return this;
}
/**
* <p>Return restore rate limit which described the max bytes that can be transferred in a
* second during restore.</p>
* <p>Return restore rate limit which described the max bytes that can be
* transferred in a second during restore.</p>
*
* @return numerical value describing the restore transfer limit in bytes per second.
* @return numerical value describing the restore transfer limit in bytes per
* second.
*/
public long restoreRateLimit() {
assert(isInitialized());
assert(isOwningHandle());
return restoreRateLimit(nativeHandle_);
}
/**
* <p>Only used if share_table_files is set to true. If true, will consider that
* backups can come from different databases, hence a sst is not uniquely
* identified by its name, but by the triple (file name, crc32, file length)</p>
* <p>Only used if share_table_files is set to true. If true, will consider
* that backups can come from different databases, hence a sst is not uniquely
* identified by its name, but by the triple (file name, crc32, file length)
* </p>
*
* @param shareFilesWithChecksum boolean value indicating if SST files are stored
* using the triple (file name, crc32, file length) and not its name.
* @param shareFilesWithChecksum boolean value indicating if SST files are
* stored using the triple (file name, crc32, file length) and not its name.
*
* <p>Note: this is an experimental option, and you'll need to set it manually
* turn it on only if you know what you're doing*</p>
@ -227,7 +236,7 @@ public class BackupableDBOptions extends RocksObject {
*/
public BackupableDBOptions setShareFilesWithChecksum(
final boolean shareFilesWithChecksum) {
assert(isInitialized());
assert(isOwningHandle());
setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum);
return this;
}
@ -239,19 +248,11 @@ public class BackupableDBOptions extends RocksObject {
* is active.
*/
public boolean shareFilesWithChecksum() {
assert(isInitialized());
assert(isOwningHandle());
return shareFilesWithChecksum(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
private native void newBackupableDBOptions(String path);
private native static long newBackupableDBOptions(final String path);
private native String backupDir(long handle);
private native void setShareTableFiles(long handle, boolean flag);
private native boolean shareTableFiles(long handle);
@ -267,5 +268,5 @@ public class BackupableDBOptions extends RocksObject {
private native long restoreRateLimit(long handle);
private native void setShareFilesWithChecksum(long handle, boolean flag);
private native boolean shareFilesWithChecksum(long handle);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -22,8 +22,6 @@ public class BloomFilter extends Filter {
private static final int DEFAULT_BITS_PER_KEY = 10;
private static final boolean DEFAULT_MODE = true;
private final int bitsPerKey_;
private final boolean useBlockBasedMode_;
/**
* BloomFilter constructor
@ -73,17 +71,9 @@ public class BloomFilter extends Filter {
* @param useBlockBasedMode use block based mode or full filter mode
*/
public BloomFilter(final int bitsPerKey, final boolean useBlockBasedMode) {
super();
bitsPerKey_ = bitsPerKey;
useBlockBasedMode_ = useBlockBasedMode;
createNewFilter();
super(createNewBloomFilter(bitsPerKey, useBlockBasedMode));
}
@Override
protected final void createNewFilter() {
createNewBloomFilter(bitsPerKey_, useBlockBasedMode_);
}
private native void createNewBloomFilter(int bitsKeyKey,
boolean useBlockBasedMode);
private native static long createNewBloomFilter(final int bitsKeyKey,
final boolean useBlockBasedMode);
}

@ -27,7 +27,7 @@ public class Checkpoint extends RocksObject {
if (db == null) {
throw new IllegalArgumentException(
"RocksDB instance shall not be null.");
} else if (!db.isInitialized()) {
} else if (!db.isOwningHandle()) {
throw new IllegalStateException(
"RocksDB instance must be initialized.");
}
@ -51,21 +51,15 @@ public class Checkpoint extends RocksObject {
createCheckpoint(nativeHandle_, checkpointPath);
}
@Override
protected void disposeInternal() {
disposeInternal(nativeHandle_);
private Checkpoint(final RocksDB db) {
super(newCheckpoint(db.nativeHandle_));
this.db_ = db;
}
private Checkpoint(RocksDB db) {
super();
nativeHandle_ = newCheckpoint(db.nativeHandle_);
db_ = db;
}
private RocksDB db_;
private final RocksDB db_;
private static native long newCheckpoint(long dbHandle);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
private native void createCheckpoint(long handle, String checkpointPath)
throws RocksDBException;

@ -12,34 +12,31 @@ package org.rocksdb;
public class ColumnFamilyHandle extends RocksObject {
ColumnFamilyHandle(final RocksDB rocksDB,
final long nativeHandle) {
super();
nativeHandle_ = nativeHandle;
super(nativeHandle);
// rocksDB must point to a valid RocksDB instance;
assert(rocksDB != null);
// ColumnFamilyHandle must hold a reference to the related RocksDB instance
// to guarantee that while a GC cycle starts ColumnFamilyHandle instances
// are freed prior to RocksDB instances.
rocksDB_ = rocksDB;
this.rocksDB_ = rocksDB;
}
/**
* <p>Deletes underlying C++ iterator pointer.</p>
*
* <p>Note: the underlying handle can only be safely deleted if the RocksDB
* instance related to a certain ColumnFamilyHandle is still valid and initialized.
* Therefore {@code disposeInternal()} checks if the RocksDB is initialized
* before freeing the native handle.</p>
* instance related to a certain ColumnFamilyHandle is still valid and
* initialized. Therefore {@code disposeInternal()} checks if the RocksDB is
* initialized before freeing the native handle.</p>
*/
@Override protected void disposeInternal() {
synchronized (rocksDB_) {
assert (isInitialized());
if (rocksDB_.isInitialized()) {
disposeInternal(nativeHandle_);
}
@Override
protected void disposeInternal() {
if(rocksDB_.isOwningHandle()) {
disposeInternal(nativeHandle_);
}
}
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
private final RocksDB rocksDB_;
}

@ -13,8 +13,8 @@ import java.util.Properties;
* ColumnFamilyOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
*
* If {@link #dispose()} function is not called, then it will be GC'd automatically
* and native resources will be released as part of the process.
* If {@link #dispose()} function is not called, then it will be GC'd
* automatically and native resources will be released as part of the process.
*/
public class ColumnFamilyOptions extends RocksObject
implements ColumnFamilyOptionsInterface {
@ -29,8 +29,7 @@ public class ColumnFamilyOptions extends RocksObject
* an {@code rocksdb::DBOptions} in the c++ side.
*/
public ColumnFamilyOptions() {
super();
newColumnFamilyOptions();
super(newColumnFamilyOptions());
}
/**
@ -113,8 +112,9 @@ public class ColumnFamilyOptions extends RocksObject
}
@Override
public ColumnFamilyOptions setComparator(final BuiltinComparator builtinComparator) {
assert(isInitialized());
public ColumnFamilyOptions setComparator(
final BuiltinComparator builtinComparator) {
assert(isOwningHandle());
setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
return this;
}
@ -122,15 +122,15 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions setComparator(
final AbstractComparator<? extends AbstractSlice<?>> comparator) {
assert (isInitialized());
setComparatorHandle(nativeHandle_, comparator.nativeHandle_);
assert (isOwningHandle());
setComparatorHandle(nativeHandle_, comparator.getNativeHandle());
comparator_ = comparator;
return this;
}
@Override
public ColumnFamilyOptions setMergeOperatorName(final String name) {
assert (isInitialized());
assert (isOwningHandle());
if (name == null) {
throw new IllegalArgumentException(
"Merge operator name must not be null.");
@ -140,13 +140,15 @@ public class ColumnFamilyOptions extends RocksObject
}
@Override
public ColumnFamilyOptions setMergeOperator(final MergeOperator mergeOperator) {
public ColumnFamilyOptions setMergeOperator(
final MergeOperator mergeOperator) {
setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle());
return this;
}
public ColumnFamilyOptions setCompactionFilter(
final AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter) {
final AbstractCompactionFilter<? extends AbstractSlice<?>>
compactionFilter) {
setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_);
compactionFilter_ = compactionFilter;
return this;
@ -154,28 +156,28 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) {
assert(isInitialized());
assert(isOwningHandle());
setWriteBufferSize(nativeHandle_, writeBufferSize);
return this;
}
@Override
public long writeBufferSize() {
assert(isInitialized());
assert(isOwningHandle());
return writeBufferSize(nativeHandle_);
}
@Override
public ColumnFamilyOptions setMaxWriteBufferNumber(
final int maxWriteBufferNumber) {
assert(isInitialized());
assert(isOwningHandle());
setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
return this;
}
@Override
public int maxWriteBufferNumber() {
assert(isInitialized());
assert(isOwningHandle());
return maxWriteBufferNumber(nativeHandle_);
}
@ -193,20 +195,21 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) {
assert(isInitialized());
assert(isOwningHandle());
useFixedLengthPrefixExtractor(nativeHandle_, n);
return this;
}
@Override
public ColumnFamilyOptions useCappedPrefixExtractor(final int n) {
assert(isInitialized());
assert(isOwningHandle());
useCappedPrefixExtractor(nativeHandle_, n);
return this;
}
@Override
public ColumnFamilyOptions setCompressionType(final CompressionType compressionType) {
public ColumnFamilyOptions setCompressionType(
final CompressionType compressionType) {
setCompressionType(nativeHandle_, compressionType.getValue());
return this;
}
@ -219,10 +222,10 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions setCompressionPerLevel(
final List<CompressionType> compressionLevels) {
final List<Byte> byteCompressionTypes = new ArrayList<>(
compressionLevels.size());
for (final CompressionType compressionLevel : compressionLevels) {
byteCompressionTypes.add(compressionLevel.getValue());
final byte[] byteCompressionTypes = new byte[
compressionLevels.size()];
for (int i = 0; i < compressionLevels.size(); i++) {
byteCompressionTypes[i] = compressionLevels.get(i).getValue();
}
setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
return this;
@ -230,7 +233,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public List<CompressionType> compressionPerLevel() {
final List<Byte> byteCompressionTypes =
final byte[] byteCompressionTypes =
compressionPerLevel(nativeHandle_);
final List<CompressionType> compressionLevels = new ArrayList<>();
for (final Byte byteCompressionType : byteCompressionTypes) {
@ -485,7 +488,7 @@ public class ColumnFamilyOptions extends RocksObject
public ColumnFamilyOptions setMaxTableFilesSizeFIFO(
final long maxTableFilesSize) {
assert(maxTableFilesSize > 0); // unsigned native type
assert(isInitialized());
assert(isOwningHandle());
setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
return this;
}
@ -523,7 +526,8 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public ColumnFamilyOptions setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) {
setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations);
setMaxSequentialSkipInIterations(nativeHandle_,
maxSequentialSkipInIterations);
return this;
}
@ -542,7 +546,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public String memTableFactoryName() {
assert(isInitialized());
assert(isOwningHandle());
return memTableFactoryName(nativeHandle_);
}
@ -556,7 +560,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public String tableFactoryName() {
assert(isInitialized());
assert(isOwningHandle());
return tableFactoryName(nativeHandle_);
}
@ -655,15 +659,6 @@ public class ColumnFamilyOptions extends RocksObject
return optimizeFiltersForHits(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
/**
* <p>Private constructor to be used by
* {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}</p>
@ -671,15 +666,14 @@ public class ColumnFamilyOptions extends RocksObject
* @param handle native handle to ColumnFamilyOptions instance.
*/
private ColumnFamilyOptions(final long handle) {
super();
nativeHandle_ = handle;
super(handle);
}
private static native long getColumnFamilyOptionsFromProps(
String optString);
private native void newColumnFamilyOptions();
private native void disposeInternal(long handle);
private static native long newColumnFamilyOptions();
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForPointLookup(long handle,
long blockCacheSizeMb);
@ -688,12 +682,12 @@ public class ColumnFamilyOptions extends RocksObject
private native void optimizeUniversalStyleCompaction(long handle,
long memtableMemoryBudget);
private native void setComparatorHandle(long handle, int builtinComparator);
private native void setComparatorHandle(long optHandle, long comparatorHandle);
private native void setMergeOperatorName(
long handle, String name);
private native void setMergeOperator(
long handle, long mergeOperatorHandle);
private native void setCompactionFilterHandle(long handle, long compactionFilterHandle);
private native void setComparatorHandle(long optHandle,
long comparatorHandle);
private native void setMergeOperatorName(long handle, String name);
private native void setMergeOperator(long handle, long mergeOperatorHandle);
private native void setCompactionFilterHandle(long handle,
long compactionFilterHandle);
private native void setWriteBufferSize(long handle, long writeBufferSize)
throws IllegalArgumentException;
private native long writeBufferSize(long handle);
@ -706,8 +700,8 @@ public class ColumnFamilyOptions extends RocksObject
private native void setCompressionType(long handle, byte compressionType);
private native byte compressionType(long handle);
private native void setCompressionPerLevel(long handle,
List<Byte> compressionLevels);
private native List<Byte> compressionPerLevel(long handle);
byte[] compressionLevels);
private native byte[] compressionPerLevel(long handle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void useCappedPrefixExtractor(

@ -15,10 +15,18 @@ package org.rocksdb;
* using @see org.rocksdb.DirectComparator
*/
public abstract class Comparator extends AbstractComparator<Slice> {
private final long nativeHandle_;
public Comparator(final ComparatorOptions copt) {
super();
createNewComparator0(copt.nativeHandle_);
this.nativeHandle_ = createNewComparator0(copt.nativeHandle_);
}
@Override
protected final long getNativeHandle() {
return nativeHandle_;
}
private native void createNewComparator0(final long comparatorOptionsHandle);
private native long createNewComparator0(final long comparatorOptionsHandle);
}

@ -10,8 +10,7 @@ package org.rocksdb;
*/
public class ComparatorOptions extends RocksObject {
public ComparatorOptions() {
super();
newComparatorOptions();
super(newComparatorOptions());
}
/**
@ -24,7 +23,7 @@ public class ComparatorOptions extends RocksObject {
* @return true if adaptive mutex is used.
*/
public boolean useAdaptiveMutex() {
assert(isInitialized());
assert(isOwningHandle());
return useAdaptiveMutex(nativeHandle_);
}
@ -39,19 +38,14 @@ public class ComparatorOptions extends RocksObject {
* @return the reference to the current comparator options.
*/
public ComparatorOptions setUseAdaptiveMutex(final boolean useAdaptiveMutex) {
assert (isInitialized());
assert (isOwningHandle());
setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
return this;
}
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void newComparatorOptions();
private native static long newComparatorOptions();
private native boolean useAdaptiveMutex(final long handle);
private native void setUseAdaptiveMutex(final long handle,
final boolean useAdaptiveMutex);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -11,8 +11,8 @@ import java.util.Properties;
* DBOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
*
* If {@link #dispose()} function is not called, then it will be GC'd automatically
* and native resources will be released as part of the process.
* If {@link #dispose()} function is not called, then it will be GC'd
* automatically and native resources will be released as part of the process.
*/
public class DBOptions extends RocksObject implements DBOptionsInterface {
static {
@ -26,9 +26,8 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
* an {@code rocksdb::DBOptions} in the c++ side.
*/
public DBOptions() {
super();
super(newDBOptions());
numShardBits_ = DEFAULT_NUM_SHARD_BITS;
newDBOptions();
}
/**
@ -75,70 +74,70 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setIncreaseParallelism(
final int totalThreads) {
assert (isInitialized());
assert(isOwningHandle());
setIncreaseParallelism(nativeHandle_, totalThreads);
return this;
}
@Override
public DBOptions setCreateIfMissing(final boolean flag) {
assert(isInitialized());
assert(isOwningHandle());
setCreateIfMissing(nativeHandle_, flag);
return this;
}
@Override
public boolean createIfMissing() {
assert(isInitialized());
assert(isOwningHandle());
return createIfMissing(nativeHandle_);
}
@Override
public DBOptions setCreateMissingColumnFamilies(
final boolean flag) {
assert(isInitialized());
assert(isOwningHandle());
setCreateMissingColumnFamilies(nativeHandle_, flag);
return this;
}
@Override
public boolean createMissingColumnFamilies() {
assert(isInitialized());
assert(isOwningHandle());
return createMissingColumnFamilies(nativeHandle_);
}
@Override
public DBOptions setErrorIfExists(
final boolean errorIfExists) {
assert(isInitialized());
assert(isOwningHandle());
setErrorIfExists(nativeHandle_, errorIfExists);
return this;
}
@Override
public boolean errorIfExists() {
assert(isInitialized());
assert(isOwningHandle());
return errorIfExists(nativeHandle_);
}
@Override
public DBOptions setParanoidChecks(
final boolean paranoidChecks) {
assert(isInitialized());
assert(isOwningHandle());
setParanoidChecks(nativeHandle_, paranoidChecks);
return this;
}
@Override
public boolean paranoidChecks() {
assert(isInitialized());
assert(isOwningHandle());
return paranoidChecks(nativeHandle_);
}
@Override
public DBOptions setRateLimiterConfig(
final RateLimiterConfig config) {
assert(isInitialized());
assert(isOwningHandle());
rateLimiterConfig_ = config;
setRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
@ -146,7 +145,7 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setLogger(final Logger logger) {
assert(isInitialized());
assert(isOwningHandle());
setLogger(nativeHandle_, logger.nativeHandle_);
return this;
}
@ -154,14 +153,14 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setInfoLogLevel(
final InfoLogLevel infoLogLevel) {
assert(isInitialized());
assert(isOwningHandle());
setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
return this;
}
@Override
public InfoLogLevel infoLogLevel() {
assert(isInitialized());
assert(isOwningHandle());
return InfoLogLevel.getInfoLogLevel(
infoLogLevel(nativeHandle_));
}
@ -169,41 +168,41 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setMaxOpenFiles(
final int maxOpenFiles) {
assert(isInitialized());
assert(isOwningHandle());
setMaxOpenFiles(nativeHandle_, maxOpenFiles);
return this;
}
@Override
public int maxOpenFiles() {
assert(isInitialized());
assert(isOwningHandle());
return maxOpenFiles(nativeHandle_);
}
@Override
public DBOptions setMaxTotalWalSize(
final long maxTotalWalSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
return this;
}
@Override
public long maxTotalWalSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxTotalWalSize(nativeHandle_);
}
@Override
public DBOptions createStatistics() {
assert(isInitialized());
assert(isOwningHandle());
createStatistics(nativeHandle_);
return this;
}
@Override
public Statistics statisticsPtr() {
assert(isInitialized());
assert(isOwningHandle());
long statsPtr = statisticsPtr(nativeHandle_);
if(statsPtr == 0) {
@ -217,287 +216,287 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setDisableDataSync(
final boolean disableDataSync) {
assert(isInitialized());
assert(isOwningHandle());
setDisableDataSync(nativeHandle_, disableDataSync);
return this;
}
@Override
public boolean disableDataSync() {
assert(isInitialized());
assert(isOwningHandle());
return disableDataSync(nativeHandle_);
}
@Override
public DBOptions setUseFsync(
final boolean useFsync) {
assert(isInitialized());
assert(isOwningHandle());
setUseFsync(nativeHandle_, useFsync);
return this;
}
@Override
public boolean useFsync() {
assert(isInitialized());
assert(isOwningHandle());
return useFsync(nativeHandle_);
}
@Override
public DBOptions setDbLogDir(
final String dbLogDir) {
assert(isInitialized());
assert(isOwningHandle());
setDbLogDir(nativeHandle_, dbLogDir);
return this;
}
@Override
public String dbLogDir() {
assert(isInitialized());
assert(isOwningHandle());
return dbLogDir(nativeHandle_);
}
@Override
public DBOptions setWalDir(
final String walDir) {
assert(isInitialized());
assert(isOwningHandle());
setWalDir(nativeHandle_, walDir);
return this;
}
@Override
public String walDir() {
assert(isInitialized());
assert(isOwningHandle());
return walDir(nativeHandle_);
}
@Override
public DBOptions setDeleteObsoleteFilesPeriodMicros(
final long micros) {
assert(isInitialized());
assert(isOwningHandle());
setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
return this;
}
@Override
public long deleteObsoleteFilesPeriodMicros() {
assert(isInitialized());
assert(isOwningHandle());
return deleteObsoleteFilesPeriodMicros(nativeHandle_);
}
@Override
public DBOptions setMaxBackgroundCompactions(
final int maxBackgroundCompactions) {
assert(isInitialized());
assert(isOwningHandle());
setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
return this;
}
@Override
public int maxBackgroundCompactions() {
assert(isInitialized());
assert(isOwningHandle());
return maxBackgroundCompactions(nativeHandle_);
}
@Override
public DBOptions setMaxBackgroundFlushes(
final int maxBackgroundFlushes) {
assert(isInitialized());
assert(isOwningHandle());
setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
return this;
}
@Override
public int maxBackgroundFlushes() {
assert(isInitialized());
assert(isOwningHandle());
return maxBackgroundFlushes(nativeHandle_);
}
@Override
public DBOptions setMaxLogFileSize(
final long maxLogFileSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxLogFileSize(nativeHandle_, maxLogFileSize);
return this;
}
@Override
public long maxLogFileSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxLogFileSize(nativeHandle_);
}
@Override
public DBOptions setLogFileTimeToRoll(
final long logFileTimeToRoll) {
assert(isInitialized());
assert(isOwningHandle());
setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
return this;
}
@Override
public long logFileTimeToRoll() {
assert(isInitialized());
assert(isOwningHandle());
return logFileTimeToRoll(nativeHandle_);
}
@Override
public DBOptions setKeepLogFileNum(
final long keepLogFileNum) {
assert(isInitialized());
assert(isOwningHandle());
setKeepLogFileNum(nativeHandle_, keepLogFileNum);
return this;
}
@Override
public long keepLogFileNum() {
assert(isInitialized());
assert(isOwningHandle());
return keepLogFileNum(nativeHandle_);
}
@Override
public DBOptions setMaxManifestFileSize(
final long maxManifestFileSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
return this;
}
@Override
public long maxManifestFileSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxManifestFileSize(nativeHandle_);
}
@Override
public DBOptions setTableCacheNumshardbits(
final int tableCacheNumshardbits) {
assert(isInitialized());
assert(isOwningHandle());
setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
return this;
}
@Override
public int tableCacheNumshardbits() {
assert(isInitialized());
assert(isOwningHandle());
return tableCacheNumshardbits(nativeHandle_);
}
@Override
public DBOptions setWalTtlSeconds(
final long walTtlSeconds) {
assert(isInitialized());
assert(isOwningHandle());
setWalTtlSeconds(nativeHandle_, walTtlSeconds);
return this;
}
@Override
public long walTtlSeconds() {
assert(isInitialized());
assert(isOwningHandle());
return walTtlSeconds(nativeHandle_);
}
@Override
public DBOptions setWalSizeLimitMB(
final long sizeLimitMB) {
assert(isInitialized());
assert(isOwningHandle());
setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
return this;
}
@Override
public long walSizeLimitMB() {
assert(isInitialized());
assert(isOwningHandle());
return walSizeLimitMB(nativeHandle_);
}
@Override
public DBOptions setManifestPreallocationSize(
final long size) {
assert(isInitialized());
assert(isOwningHandle());
setManifestPreallocationSize(nativeHandle_, size);
return this;
}
@Override
public long manifestPreallocationSize() {
assert(isInitialized());
assert(isOwningHandle());
return manifestPreallocationSize(nativeHandle_);
}
@Override
public DBOptions setAllowOsBuffer(
final boolean allowOsBuffer) {
assert(isInitialized());
assert(isOwningHandle());
setAllowOsBuffer(nativeHandle_, allowOsBuffer);
return this;
}
@Override
public boolean allowOsBuffer() {
assert(isInitialized());
assert(isOwningHandle());
return allowOsBuffer(nativeHandle_);
}
@Override
public DBOptions setAllowMmapReads(
final boolean allowMmapReads) {
assert(isInitialized());
assert(isOwningHandle());
setAllowMmapReads(nativeHandle_, allowMmapReads);
return this;
}
@Override
public boolean allowMmapReads() {
assert(isInitialized());
assert(isOwningHandle());
return allowMmapReads(nativeHandle_);
}
@Override
public DBOptions setAllowMmapWrites(
final boolean allowMmapWrites) {
assert(isInitialized());
assert(isOwningHandle());
setAllowMmapWrites(nativeHandle_, allowMmapWrites);
return this;
}
@Override
public boolean allowMmapWrites() {
assert(isInitialized());
assert(isOwningHandle());
return allowMmapWrites(nativeHandle_);
}
@Override
public DBOptions setIsFdCloseOnExec(
final boolean isFdCloseOnExec) {
assert(isInitialized());
assert(isOwningHandle());
setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
return this;
}
@Override
public boolean isFdCloseOnExec() {
assert(isInitialized());
assert(isOwningHandle());
return isFdCloseOnExec(nativeHandle_);
}
@Override
public DBOptions setStatsDumpPeriodSec(
final int statsDumpPeriodSec) {
assert(isInitialized());
assert(isOwningHandle());
setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
return this;
}
@Override
public int statsDumpPeriodSec() {
assert(isInitialized());
assert(isOwningHandle());
return statsDumpPeriodSec(nativeHandle_);
}
@Override
public DBOptions setAdviseRandomOnOpen(
final boolean adviseRandomOnOpen) {
assert(isInitialized());
assert(isOwningHandle());
setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
return this;
}
@ -510,21 +509,21 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
@Override
public DBOptions setUseAdaptiveMutex(
final boolean useAdaptiveMutex) {
assert(isInitialized());
assert(isOwningHandle());
setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
return this;
}
@Override
public boolean useAdaptiveMutex() {
assert(isInitialized());
assert(isOwningHandle());
return useAdaptiveMutex(nativeHandle_);
}
@Override
public DBOptions setBytesPerSync(
final long bytesPerSync) {
assert(isInitialized());
assert(isOwningHandle());
setBytesPerSync(nativeHandle_, bytesPerSync);
return this;
}
@ -534,33 +533,23 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return bytesPerSync(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
static final int DEFAULT_NUM_SHARD_BITS = -1;
/**
* <p>Private constructor to be used by
* {@link #getDBOptionsFromProps(java.util.Properties)}</p>
*
* @param handle native handle to DBOptions instance.
* @param nativeHandle native handle to DBOptions instance.
*/
private DBOptions(final long handle) {
super();
nativeHandle_ = handle;
private DBOptions(final long nativeHandle) {
super(nativeHandle);
}
private static native long getDBOptionsFromProps(
String optString);
private native void newDBOptions();
private native void disposeInternal(long handle);
private native static long newDBOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setIncreaseParallelism(long handle, int totalThreads);
private native void setCreateIfMissing(long handle, boolean flag);

@ -15,10 +15,19 @@ package org.rocksdb;
* using @see org.rocksdb.Comparator
*/
public abstract class DirectComparator extends AbstractComparator<DirectSlice> {
private final long nativeHandle_;
public DirectComparator(final ComparatorOptions copt) {
super();
createNewDirectComparator0(copt.nativeHandle_);
this.nativeHandle_ = createNewDirectComparator0(copt.nativeHandle_);
}
@Override
protected final long getNativeHandle() {
return nativeHandle_;
}
private native void createNewDirectComparator0(final long comparatorOptionsHandle);
private native long createNewDirectComparator0(
final long comparatorOptionsHandle);
}

@ -16,7 +16,6 @@ import java.nio.ByteBuffer;
* values consider using @see org.rocksdb.Slice
*/
public class DirectSlice extends AbstractSlice<ByteBuffer> {
//TODO(AR) only needed by WriteBatchWithIndexTest until JDK8
public final static DirectSlice NONE = new DirectSlice();
/**
@ -24,17 +23,15 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* without an underlying C++ object set
* at creation time.
*
* Note: You should be aware that
* {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally
* called from the default DirectSlice constructor, and that it is marked as
* package-private. This is so that developers cannot construct their own default
* DirectSlice objects (at present). As developers cannot construct their own
* DirectSlice objects through this, they are not creating underlying C++
* DirectSlice objects, and so there is nothing to free (dispose) from Java.
* Note: You should be aware that it is intentionally marked as
* package-private. This is so that developers cannot construct their own
* default DirectSlice objects (at present). As developers cannot construct
* their own DirectSlice objects through this, they are not creating
* underlying C++ DirectSlice objects, and so there is nothing to free
* (dispose) from Java.
*/
DirectSlice() {
super();
disOwnNativeHandle();
}
/**
@ -45,8 +42,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* @param str The string
*/
public DirectSlice(final String str) {
super();
createNewSliceFromString(str);
super(createNewSliceFromString(str));
}
/**
@ -58,9 +54,7 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* @param length The length of the data to use for the slice
*/
public DirectSlice(final ByteBuffer data, final int length) {
super();
assert(data.isDirect());
createNewDirectSlice0(data, length);
super(createNewDirectSlice0(ensureDirect(data), length));
}
/**
@ -71,9 +65,14 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* @param data The bugger containing the data
*/
public DirectSlice(final ByteBuffer data) {
super();
super(createNewDirectSlice1(ensureDirect(data)));
}
private static ByteBuffer ensureDirect(final ByteBuffer data) {
// TODO(AR) consider throwing a checked exception, as if it's not direct
// this can SIGSEGV
assert(data.isDirect());
createNewDirectSlice1(data);
return data;
}
/**
@ -85,16 +84,14 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* @return the requested byte
*/
public byte get(int offset) {
assert (isInitialized());
return get0(nativeHandle_, offset);
return get0(getNativeHandle(), offset);
}
/**
* Clears the backing slice
*/
public void clear() {
assert (isInitialized());
clear0(nativeHandle_);
clear0(getNativeHandle());
}
/**
@ -105,12 +102,12 @@ public class DirectSlice extends AbstractSlice<ByteBuffer> {
* @param n The number of bytes to drop
*/
public void removePrefix(final int n) {
assert (isInitialized());
removePrefix0(nativeHandle_, n);
removePrefix0(getNativeHandle(), n);
}
private native void createNewDirectSlice0(ByteBuffer data, int length);
private native void createNewDirectSlice1(ByteBuffer data);
private native static long createNewDirectSlice0(final ByteBuffer data,
final int length);
private native static long createNewDirectSlice1(final ByteBuffer data);
@Override protected final native ByteBuffer data0(long handle);
private native byte get0(long handle, int offset);
private native void clear0(long handle);

@ -70,8 +70,8 @@ public abstract class Env extends RocksObject {
}
protected Env() {
super();
protected Env(final long nativeHandle) {
super(nativeHandle);
}
static {

@ -13,7 +13,10 @@ package org.rocksdb;
* DB::Get() call.
*/
public abstract class Filter extends RocksObject {
protected abstract void createNewFilter();
protected Filter(final long nativeHandle) {
super(nativeHandle);
}
/**
* Deletes underlying C++ filter pointer.
@ -22,10 +25,11 @@ public abstract class Filter extends RocksObject {
* RocksDB instances referencing the filter are closed.
* Otherwise an undefined behavior will occur.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
@Override
protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
@Override
protected final native void disposeInternal(final long handle);
}

@ -10,8 +10,7 @@ public class FlushOptions extends RocksObject {
* Construct a new instance of FlushOptions.
*/
public FlushOptions(){
super();
newFlushOptions();
super(newFlushOptions());
}
/**
@ -23,7 +22,7 @@ public class FlushOptions extends RocksObject {
* @return instance of current FlushOptions.
*/
public FlushOptions setWaitForFlush(final boolean waitForFlush) {
assert(isInitialized());
assert(isOwningHandle());
setWaitForFlush(nativeHandle_, waitForFlush);
return this;
}
@ -35,16 +34,12 @@ public class FlushOptions extends RocksObject {
* waits for termination of the flush process.
*/
public boolean waitForFlush() {
assert(isInitialized());
assert(isOwningHandle());
return waitForFlush(nativeHandle_);
}
@Override protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
private native void newFlushOptions();
private native void disposeInternal(long handle);
private native static long newFlushOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setWaitForFlush(long handle,
boolean wait);
private native boolean waitForFlush(long handle);

@ -35,7 +35,9 @@ package org.rocksdb;
* {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}.
* </p>
*/
public abstract class Logger extends RocksObject {
public abstract class Logger extends AbstractImmutableNativeReference {
final long nativeHandle_;
/**
* <p>AbstractLogger constructor.</p>
@ -47,7 +49,8 @@ public abstract class Logger extends RocksObject {
* @param options {@link org.rocksdb.Options} instance.
*/
public Logger(final Options options) {
createNewLoggerOptions(options.nativeHandle_);
super(true);
this.nativeHandle_ = createNewLoggerOptions(options.nativeHandle_);
}
/**
@ -60,7 +63,8 @@ public abstract class Logger extends RocksObject {
* @param dboptions {@link org.rocksdb.DBOptions} instance.
*/
public Logger(final DBOptions dboptions) {
createNewLoggerDbOptions(dboptions.nativeHandle_);
super(true);
this.nativeHandle_ = createNewLoggerDbOptions(dboptions.nativeHandle_);
}
/**
@ -93,16 +97,15 @@ public abstract class Logger extends RocksObject {
*/
@Override
protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
protected native void createNewLoggerOptions(
protected native long createNewLoggerOptions(
long options);
protected native void createNewLoggerDbOptions(
protected native long createNewLoggerDbOptions(
long dbOptions);
protected native void setInfoLogLevel(long handle,
byte infoLogLevel);
protected native byte infoLogLevel(long handle);
private native void disposeInternal(long handle);
private native void disposeInternal(final long handle);
}

@ -12,8 +12,8 @@ import java.util.List;
* Options to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
*
* If {@link #dispose()} function is not called, then it will be GC'd automatically
* and native resources will be released as part of the process.
* If {@link #dispose()} function is not called, then it will be GC'd
* automaticallyand native resources will be released as part of the process.
*/
public class Options extends RocksObject
implements DBOptionsInterface, ColumnFamilyOptionsInterface {
@ -27,8 +27,7 @@ public class Options extends RocksObject
* an {@code rocksdb::Options} in the c++ side.
*/
public Options() {
super();
newOptions();
super(newOptions());
env_ = Env.getDefault();
}
@ -42,28 +41,28 @@ public class Options extends RocksObject
*/
public Options(final DBOptions dbOptions,
final ColumnFamilyOptions columnFamilyOptions) {
super();
newOptions(dbOptions.nativeHandle_, columnFamilyOptions.nativeHandle_);
super(newOptions(dbOptions.nativeHandle_,
columnFamilyOptions.nativeHandle_));
env_ = Env.getDefault();
}
@Override
public Options setIncreaseParallelism(final int totalThreads) {
assert(isInitialized());
assert(isOwningHandle());
setIncreaseParallelism(nativeHandle_, totalThreads);
return this;
}
@Override
public Options setCreateIfMissing(final boolean flag) {
assert(isInitialized());
assert(isOwningHandle());
setCreateIfMissing(nativeHandle_, flag);
return this;
}
@Override
public Options setCreateMissingColumnFamilies(final boolean flag) {
assert(isInitialized());
assert(isOwningHandle());
setCreateMissingColumnFamilies(nativeHandle_, flag);
return this;
}
@ -77,7 +76,7 @@ public class Options extends RocksObject
* @return the instance of the current Options.
*/
public Options setEnv(final Env env) {
assert(isInitialized());
assert(isOwningHandle());
setEnv(nativeHandle_, env.nativeHandle_);
env_ = env;
return this;
@ -111,13 +110,13 @@ public class Options extends RocksObject
@Override
public boolean createIfMissing() {
assert(isInitialized());
assert(isOwningHandle());
return createIfMissing(nativeHandle_);
}
@Override
public boolean createMissingColumnFamilies() {
assert(isInitialized());
assert(isOwningHandle());
return createMissingColumnFamilies(nativeHandle_);
}
@ -161,7 +160,7 @@ public class Options extends RocksObject
@Override
public Options setComparator(final BuiltinComparator builtinComparator) {
assert(isInitialized());
assert(isOwningHandle());
setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
return this;
}
@ -169,15 +168,15 @@ public class Options extends RocksObject
@Override
public Options setComparator(
final AbstractComparator<? extends AbstractSlice<?>> comparator) {
assert (isInitialized());
setComparatorHandle(nativeHandle_, comparator.nativeHandle_);
assert(isOwningHandle());
setComparatorHandle(nativeHandle_, comparator.getNativeHandle());
comparator_ = comparator;
return this;
}
@Override
public Options setMergeOperatorName(final String name) {
assert (isInitialized());
assert(isOwningHandle());
if (name == null) {
throw new IllegalArgumentException(
"Merge operator name must not be null.");
@ -194,164 +193,164 @@ public class Options extends RocksObject
@Override
public Options setWriteBufferSize(final long writeBufferSize) {
assert(isInitialized());
assert(isOwningHandle());
setWriteBufferSize(nativeHandle_, writeBufferSize);
return this;
}
@Override
public long writeBufferSize() {
assert(isInitialized());
assert(isOwningHandle());
return writeBufferSize(nativeHandle_);
}
@Override
public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) {
assert(isInitialized());
assert(isOwningHandle());
setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
return this;
}
@Override
public int maxWriteBufferNumber() {
assert(isInitialized());
assert(isOwningHandle());
return maxWriteBufferNumber(nativeHandle_);
}
@Override
public boolean errorIfExists() {
assert(isInitialized());
assert(isOwningHandle());
return errorIfExists(nativeHandle_);
}
@Override
public Options setErrorIfExists(final boolean errorIfExists) {
assert(isInitialized());
assert(isOwningHandle());
setErrorIfExists(nativeHandle_, errorIfExists);
return this;
}
@Override
public boolean paranoidChecks() {
assert(isInitialized());
assert(isOwningHandle());
return paranoidChecks(nativeHandle_);
}
@Override
public Options setParanoidChecks(final boolean paranoidChecks) {
assert(isInitialized());
assert(isOwningHandle());
setParanoidChecks(nativeHandle_, paranoidChecks);
return this;
}
@Override
public int maxOpenFiles() {
assert(isInitialized());
assert(isOwningHandle());
return maxOpenFiles(nativeHandle_);
}
@Override
public Options setMaxTotalWalSize(final long maxTotalWalSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
return this;
}
@Override
public long maxTotalWalSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxTotalWalSize(nativeHandle_);
}
@Override
public Options setMaxOpenFiles(final int maxOpenFiles) {
assert(isInitialized());
assert(isOwningHandle());
setMaxOpenFiles(nativeHandle_, maxOpenFiles);
return this;
}
@Override
public boolean disableDataSync() {
assert(isInitialized());
assert(isOwningHandle());
return disableDataSync(nativeHandle_);
}
@Override
public Options setDisableDataSync(final boolean disableDataSync) {
assert(isInitialized());
assert(isOwningHandle());
setDisableDataSync(nativeHandle_, disableDataSync);
return this;
}
@Override
public boolean useFsync() {
assert(isInitialized());
assert(isOwningHandle());
return useFsync(nativeHandle_);
}
@Override
public Options setUseFsync(final boolean useFsync) {
assert(isInitialized());
assert(isOwningHandle());
setUseFsync(nativeHandle_, useFsync);
return this;
}
@Override
public String dbLogDir() {
assert(isInitialized());
assert(isOwningHandle());
return dbLogDir(nativeHandle_);
}
@Override
public Options setDbLogDir(final String dbLogDir) {
assert(isInitialized());
assert(isOwningHandle());
setDbLogDir(nativeHandle_, dbLogDir);
return this;
}
@Override
public String walDir() {
assert(isInitialized());
assert(isOwningHandle());
return walDir(nativeHandle_);
}
@Override
public Options setWalDir(final String walDir) {
assert(isInitialized());
assert(isOwningHandle());
setWalDir(nativeHandle_, walDir);
return this;
}
@Override
public long deleteObsoleteFilesPeriodMicros() {
assert(isInitialized());
assert(isOwningHandle());
return deleteObsoleteFilesPeriodMicros(nativeHandle_);
}
@Override
public Options setDeleteObsoleteFilesPeriodMicros(
final long micros) {
assert(isInitialized());
assert(isOwningHandle());
setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
return this;
}
@Override
public int maxBackgroundCompactions() {
assert(isInitialized());
assert(isOwningHandle());
return maxBackgroundCompactions(nativeHandle_);
}
@Override
public Options createStatistics() {
assert(isInitialized());
assert(isOwningHandle());
createStatistics(nativeHandle_);
return this;
}
@Override
public Statistics statisticsPtr() {
assert(isInitialized());
assert(isOwningHandle());
long statsPtr = statisticsPtr(nativeHandle_);
if(statsPtr == 0) {
@ -365,74 +364,74 @@ public class Options extends RocksObject
@Override
public Options setMaxBackgroundCompactions(
final int maxBackgroundCompactions) {
assert(isInitialized());
assert(isOwningHandle());
setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
return this;
}
@Override
public int maxBackgroundFlushes() {
assert(isInitialized());
assert(isOwningHandle());
return maxBackgroundFlushes(nativeHandle_);
}
@Override
public Options setMaxBackgroundFlushes(
final int maxBackgroundFlushes) {
assert(isInitialized());
assert(isOwningHandle());
setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
return this;
}
@Override
public long maxLogFileSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxLogFileSize(nativeHandle_);
}
@Override
public Options setMaxLogFileSize(final long maxLogFileSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxLogFileSize(nativeHandle_, maxLogFileSize);
return this;
}
@Override
public long logFileTimeToRoll() {
assert(isInitialized());
assert(isOwningHandle());
return logFileTimeToRoll(nativeHandle_);
}
@Override
public Options setLogFileTimeToRoll(final long logFileTimeToRoll) {
assert(isInitialized());
assert(isOwningHandle());
setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
return this;
}
@Override
public long keepLogFileNum() {
assert(isInitialized());
assert(isOwningHandle());
return keepLogFileNum(nativeHandle_);
}
@Override
public Options setKeepLogFileNum(final long keepLogFileNum) {
assert(isInitialized());
assert(isOwningHandle());
setKeepLogFileNum(nativeHandle_, keepLogFileNum);
return this;
}
@Override
public long maxManifestFileSize() {
assert(isInitialized());
assert(isOwningHandle());
return maxManifestFileSize(nativeHandle_);
}
@Override
public Options setMaxManifestFileSize(
final long maxManifestFileSize) {
assert(isInitialized());
assert(isOwningHandle());
setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
return this;
}
@ -441,7 +440,7 @@ public class Options extends RocksObject
public Options setMaxTableFilesSizeFIFO(
final long maxTableFilesSize) {
assert(maxTableFilesSize > 0); // unsigned native type
assert(isInitialized());
assert(isOwningHandle());
setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
return this;
}
@ -453,118 +452,118 @@ public class Options extends RocksObject
@Override
public int tableCacheNumshardbits() {
assert(isInitialized());
assert(isOwningHandle());
return tableCacheNumshardbits(nativeHandle_);
}
@Override
public Options setTableCacheNumshardbits(
final int tableCacheNumshardbits) {
assert(isInitialized());
assert(isOwningHandle());
setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
return this;
}
@Override
public long walTtlSeconds() {
assert(isInitialized());
assert(isOwningHandle());
return walTtlSeconds(nativeHandle_);
}
@Override
public Options setWalTtlSeconds(final long walTtlSeconds) {
assert(isInitialized());
assert(isOwningHandle());
setWalTtlSeconds(nativeHandle_, walTtlSeconds);
return this;
}
@Override
public long walSizeLimitMB() {
assert(isInitialized());
assert(isOwningHandle());
return walSizeLimitMB(nativeHandle_);
}
@Override
public Options setWalSizeLimitMB(final long sizeLimitMB) {
assert(isInitialized());
assert(isOwningHandle());
setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
return this;
}
@Override
public long manifestPreallocationSize() {
assert(isInitialized());
assert(isOwningHandle());
return manifestPreallocationSize(nativeHandle_);
}
@Override
public Options setManifestPreallocationSize(final long size) {
assert(isInitialized());
assert(isOwningHandle());
setManifestPreallocationSize(nativeHandle_, size);
return this;
}
@Override
public boolean allowOsBuffer() {
assert(isInitialized());
assert(isOwningHandle());
return allowOsBuffer(nativeHandle_);
}
@Override
public Options setAllowOsBuffer(final boolean allowOsBuffer) {
assert(isInitialized());
assert(isOwningHandle());
setAllowOsBuffer(nativeHandle_, allowOsBuffer);
return this;
}
@Override
public boolean allowMmapReads() {
assert(isInitialized());
assert(isOwningHandle());
return allowMmapReads(nativeHandle_);
}
@Override
public Options setAllowMmapReads(final boolean allowMmapReads) {
assert(isInitialized());
assert(isOwningHandle());
setAllowMmapReads(nativeHandle_, allowMmapReads);
return this;
}
@Override
public boolean allowMmapWrites() {
assert(isInitialized());
assert(isOwningHandle());
return allowMmapWrites(nativeHandle_);
}
@Override
public Options setAllowMmapWrites(final boolean allowMmapWrites) {
assert(isInitialized());
assert(isOwningHandle());
setAllowMmapWrites(nativeHandle_, allowMmapWrites);
return this;
}
@Override
public boolean isFdCloseOnExec() {
assert(isInitialized());
assert(isOwningHandle());
return isFdCloseOnExec(nativeHandle_);
}
@Override
public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) {
assert(isInitialized());
assert(isOwningHandle());
setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
return this;
}
@Override
public int statsDumpPeriodSec() {
assert(isInitialized());
assert(isOwningHandle());
return statsDumpPeriodSec(nativeHandle_);
}
@Override
public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) {
assert(isInitialized());
assert(isOwningHandle());
setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
return this;
}
@ -576,20 +575,20 @@ public class Options extends RocksObject
@Override
public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) {
assert(isInitialized());
assert(isOwningHandle());
setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
return this;
}
@Override
public boolean useAdaptiveMutex() {
assert(isInitialized());
assert(isOwningHandle());
return useAdaptiveMutex(nativeHandle_);
}
@Override
public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) {
assert(isInitialized());
assert(isOwningHandle());
setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
return this;
}
@ -601,7 +600,7 @@ public class Options extends RocksObject
@Override
public Options setBytesPerSync(final long bytesPerSync) {
assert(isInitialized());
assert(isOwningHandle());
setBytesPerSync(nativeHandle_, bytesPerSync);
return this;
}
@ -622,28 +621,28 @@ public class Options extends RocksObject
@Override
public Options setLogger(final Logger logger) {
assert(isInitialized());
assert(isOwningHandle());
setLogger(nativeHandle_, logger.nativeHandle_);
return this;
}
@Override
public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) {
assert(isInitialized());
assert(isOwningHandle());
setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
return this;
}
@Override
public InfoLogLevel infoLogLevel() {
assert(isInitialized());
assert(isOwningHandle());
return InfoLogLevel.getInfoLogLevel(
infoLogLevel(nativeHandle_));
}
@Override
public String memTableFactoryName() {
assert(isInitialized());
assert(isOwningHandle());
return memTableFactoryName(nativeHandle_);
}
@ -656,20 +655,20 @@ public class Options extends RocksObject
@Override
public String tableFactoryName() {
assert(isInitialized());
assert(isOwningHandle());
return tableFactoryName(nativeHandle_);
}
@Override
public Options useFixedLengthPrefixExtractor(final int n) {
assert(isInitialized());
assert(isOwningHandle());
useFixedLengthPrefixExtractor(nativeHandle_, n);
return this;
}
@Override
public Options useCappedPrefixExtractor(final int n) {
assert(isInitialized());
assert(isOwningHandle());
useCappedPrefixExtractor(nativeHandle_, n);
return this;
}
@ -680,11 +679,12 @@ public class Options extends RocksObject
}
@Override
public Options setCompressionPerLevel(final List<CompressionType> compressionLevels) {
final List<Byte> byteCompressionTypes = new ArrayList<>(
compressionLevels.size());
for (final CompressionType compressionLevel : compressionLevels) {
byteCompressionTypes.add(compressionLevel.getValue());
public Options setCompressionPerLevel(
final List<CompressionType> compressionLevels) {
final byte[] byteCompressionTypes = new byte[
compressionLevels.size()];
for (int i = 0; i < compressionLevels.size(); i++) {
byteCompressionTypes[i] = compressionLevels.get(i).getValue();
}
setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
return this;
@ -692,7 +692,7 @@ public class Options extends RocksObject
@Override
public List<CompressionType> compressionPerLevel() {
final List<Byte> byteCompressionTypes =
final byte[] byteCompressionTypes =
compressionPerLevel(nativeHandle_);
final List<CompressionType> compressionLevels = new ArrayList<>();
for (final Byte byteCompressionType : byteCompressionTypes) {
@ -975,7 +975,8 @@ public class Options extends RocksObject
@Override
public Options setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) {
setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations);
setMaxSequentialSkipInIterations(nativeHandle_,
maxSequentialSkipInIterations);
return this;
}
@ -1085,19 +1086,10 @@ public class Options extends RocksObject
return optimizeFiltersForHits(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void newOptions();
private native void newOptions(long dbOptHandle,
private native static long newOptions();
private native static long newOptions(long dbOptHandle,
long cfOptHandle);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
private native void setEnv(long optHandle, long envHandle);
private native void prepareForBulkLoad(long handle);
@ -1200,7 +1192,8 @@ public class Options extends RocksObject
private native void optimizeUniversalStyleCompaction(long handle,
long memtableMemoryBudget);
private native void setComparatorHandle(long handle, int builtinComparator);
private native void setComparatorHandle(long optHandle, long comparatorHandle);
private native void setComparatorHandle(long optHandle,
long comparatorHandle);
private native void setMergeOperatorName(
long handle, String name);
private native void setMergeOperator(
@ -1217,8 +1210,8 @@ public class Options extends RocksObject
private native void setCompressionType(long handle, byte compressionType);
private native byte compressionType(long handle);
private native void setCompressionPerLevel(long handle,
List<Byte> compressionLevels);
private native List<Byte> compressionPerLevel(long handle);
byte[] compressionLevels);
private native byte[] compressionPerLevel(long handle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void useCappedPrefixExtractor(

@ -13,10 +13,9 @@ package org.rocksdb;
*/
public class ReadOptions extends RocksObject {
public ReadOptions() {
super();
newReadOptions();
super(newReadOptions());
}
private native void newReadOptions();
private native static long newReadOptions();
/**
* If true, all data read from underlying storage will be
@ -26,7 +25,7 @@ public class ReadOptions extends RocksObject {
* @return true if checksum verification is on.
*/
public boolean verifyChecksums() {
assert(isInitialized());
assert(isOwningHandle());
return verifyChecksums(nativeHandle_);
}
private native boolean verifyChecksums(long handle);
@ -42,7 +41,7 @@ public class ReadOptions extends RocksObject {
*/
public ReadOptions setVerifyChecksums(
final boolean verifyChecksums) {
assert(isInitialized());
assert(isOwningHandle());
setVerifyChecksums(nativeHandle_, verifyChecksums);
return this;
}
@ -59,7 +58,7 @@ public class ReadOptions extends RocksObject {
* @return true if the fill-cache behavior is on.
*/
public boolean fillCache() {
assert(isInitialized());
assert(isOwningHandle());
return fillCache(nativeHandle_);
}
private native boolean fillCache(long handle);
@ -74,7 +73,7 @@ public class ReadOptions extends RocksObject {
* @return the reference to the current ReadOptions.
*/
public ReadOptions setFillCache(final boolean fillCache) {
assert(isInitialized());
assert(isOwningHandle());
setFillCache(nativeHandle_, fillCache);
return this;
}
@ -92,7 +91,7 @@ public class ReadOptions extends RocksObject {
* @return the reference to the current ReadOptions.
*/
public ReadOptions setSnapshot(final Snapshot snapshot) {
assert(isInitialized());
assert(isOwningHandle());
if (snapshot != null) {
setSnapshot(nativeHandle_, snapshot.nativeHandle_);
} else {
@ -109,7 +108,7 @@ public class ReadOptions extends RocksObject {
* is assigned null.
*/
public Snapshot snapshot() {
assert(isInitialized());
assert(isOwningHandle());
long snapshotHandle = snapshot(nativeHandle_);
if (snapshotHandle != 0) {
return new Snapshot(snapshotHandle);
@ -130,7 +129,7 @@ public class ReadOptions extends RocksObject {
* @return true if tailing iterator is enabled.
*/
public boolean tailing() {
assert(isInitialized());
assert(isOwningHandle());
return tailing(nativeHandle_);
}
private native boolean tailing(long handle);
@ -147,17 +146,13 @@ public class ReadOptions extends RocksObject {
* @return the reference to the current ReadOptions.
*/
public ReadOptions setTailing(final boolean tailing) {
assert(isInitialized());
assert(isOwningHandle());
setTailing(nativeHandle_, tailing);
return this;
}
private native void setTailing(
long handle, boolean tailing);
@Override protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -8,11 +8,11 @@ package org.rocksdb;
/**
* Just a Java wrapper around EmptyValueCompactionFilter implemented in C++
*/
public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter<Slice> {
public class RemoveEmptyValueCompactionFilter
extends AbstractCompactionFilter<Slice> {
public RemoveEmptyValueCompactionFilter() {
super();
createNewRemoveEmptyValueCompactionFilter0();
super(createNewRemoveEmptyValueCompactionFilter0());
}
private native void createNewRemoveEmptyValueCompactionFilter0();
private native static long createNewRemoveEmptyValueCompactionFilter0();
}

@ -23,8 +23,7 @@ public class RestoreBackupableDB extends RocksObject {
* @param options {@link org.rocksdb.BackupableDBOptions} instance
*/
public RestoreBackupableDB(final BackupableDBOptions options) {
super();
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
super(newRestoreBackupableDB(options.nativeHandle_));
}
/**
@ -52,7 +51,7 @@ public class RestoreBackupableDB extends RocksObject {
public void restoreDBFromBackup(final long backupId, final String dbDir,
final String walDir, final RestoreOptions restoreOptions)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
restoreDBFromBackup0(nativeHandle_, backupId, dbDir, walDir,
restoreOptions.nativeHandle_);
}
@ -70,7 +69,7 @@ public class RestoreBackupableDB extends RocksObject {
public void restoreDBFromLatestBackup(final String dbDir,
final String walDir, final RestoreOptions restoreOptions)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
restoreDBFromLatestBackup0(nativeHandle_, dbDir, walDir,
restoreOptions.nativeHandle_);
}
@ -85,7 +84,7 @@ public class RestoreBackupableDB extends RocksObject {
*/
public void purgeOldBackups(final int numBackupsToKeep)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
purgeOldBackups0(nativeHandle_, numBackupsToKeep);
}
@ -99,7 +98,7 @@ public class RestoreBackupableDB extends RocksObject {
*/
public void deleteBackup(final int backupId)
throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
deleteBackup0(nativeHandle_, backupId);
}
@ -110,7 +109,7 @@ public class RestoreBackupableDB extends RocksObject {
* @return List of {@link BackupInfo} instances.
*/
public List<BackupInfo> getBackupInfos() {
assert(isInitialized());
assert(isOwningHandle());
return getBackupInfo(nativeHandle_);
}
@ -122,7 +121,7 @@ public class RestoreBackupableDB extends RocksObject {
* @return array of backup ids as int ids.
*/
public int[] getCorruptedBackups() {
assert(isInitialized());
assert(isOwningHandle());
return getCorruptedBackups(nativeHandle_);
}
@ -135,19 +134,11 @@ public class RestoreBackupableDB extends RocksObject {
* native library.
*/
public void garbageCollect() throws RocksDBException {
assert(isInitialized());
assert(isOwningHandle());
garbageCollect(nativeHandle_);
}
/**
* <p>Release the memory allocated for the current instance
* in the c++ side.</p>
*/
@Override public synchronized void disposeInternal() {
dispose(nativeHandle_);
}
private native long newRestoreBackupableDB(long options);
private native static long newRestoreBackupableDB(final long options);
private native void restoreDBFromBackup0(long nativeHandle, long backupId,
String dbDir, String walDir, long restoreOptions)
throws RocksDBException;
@ -162,5 +153,6 @@ public class RestoreBackupableDB extends RocksObject {
private native int[] getCorruptedBackups(long handle);
private native void garbageCollect(long handle)
throws RocksDBException;
private native void dispose(long nativeHandle);
@Override protected final native void disposeInternal(
final long nativeHandle);
}

@ -16,26 +16,17 @@ public class RestoreOptions extends RocksObject {
/**
* Constructor
*
* @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It
* will also move all log files from archive directory to wal_dir. Use this
* option in combination with BackupableDBOptions::backup_log_files = false
* for persisting in-memory databases.
* Default: false
* @param keepLogFiles If true, restore won't overwrite the existing log files
* in wal_dir. It will also move all log files from archive directory to
* wal_dir. Use this option in combination with
* BackupableDBOptions::backup_log_files = false for persisting in-memory
* databases.
* Default: false
*/
public RestoreOptions(final boolean keepLogFiles) {
super();
nativeHandle_ = newRestoreOptions(keepLogFiles);
super(newRestoreOptions(keepLogFiles));
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override public synchronized void disposeInternal() {
assert(isInitialized());
dispose(nativeHandle_);
}
private native long newRestoreOptions(boolean keepLogFiles);
private native void dispose(long handle);
private native static long newRestoreOptions(boolean keepLogFiles);
@Override protected final native void disposeInternal(final long handle);
}

@ -48,7 +48,8 @@ public class RocksDB extends RocksObject {
}
catch (IOException e)
{
throw new RuntimeException("Unable to load the RocksDB shared library" + e);
throw new RuntimeException("Unable to load the RocksDB shared library"
+ e);
}
}
@ -78,7 +79,8 @@ public class RocksDB extends RocksObject {
UnsatisfiedLinkError err = null;
for (String path : paths) {
try {
System.load(path + "/" + Environment.getJniLibraryFileName("rocksdbjni"));
System.load(path + "/" +
Environment.getJniLibraryFileName("rocksdbjni"));
success = true;
break;
} catch (UnsatisfiedLinkError e) {
@ -116,8 +118,8 @@ public class RocksDB extends RocksObject {
* the path to the database using the specified options and db path and a list
* of column family names.
* <p>
* If opened in read write mode every existing column family name must be passed
* within the list to this method.</p>
* If opened in read write mode every existing column family name must be
* passed within the list to this method.</p>
* <p>
* If opened in read-only mode only a subset of existing column families must
* be passed to this method.</p>
@ -179,9 +181,7 @@ public class RocksDB extends RocksObject {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
RocksDB db = new RocksDB();
db.open(options.nativeHandle_, path);
final RocksDB db = new RocksDB(open(options.nativeHandle_, path));
db.storeOptionsInstance(options);
return db;
}
@ -191,8 +191,8 @@ public class RocksDB extends RocksObject {
* the path to the database using the specified options and db path and a list
* of column family names.
* <p>
* If opened in read write mode every existing column family name must be passed
* within the list to this method.</p>
* If opened in read write mode every existing column family name must be
* passed within the list to this method.</p>
* <p>
* If opened in read-only mode only a subset of existing column families must
* be passed to this method.</p>
@ -206,7 +206,8 @@ public class RocksDB extends RocksObject {
* with new Options instance as underlying native statistics instance does not
* use any locks to prevent concurrent updates.</p>
* <p>
* ColumnFamily handles are disposed when the RocksDB instance is disposed.</p>
* ColumnFamily handles are disposed when the RocksDB instance is disposed.
* </p>
*
* @param options {@link org.rocksdb.DBOptions} instance.
* @param path the path to the rocksdb.
@ -225,13 +226,25 @@ public class RocksDB extends RocksObject {
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
RocksDB db = new RocksDB();
List<Long> cfReferences = db.open(options.nativeHandle_, path,
columnFamilyDescriptors, columnFamilyDescriptors.size());
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, cfReferences.get(i)));
final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
.get(i);
cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
}
final long[] handles = open(options.nativeHandle_, path, cfNames,
cfOptionHandles);
final RocksDB db = new RocksDB(handles[0]);
db.storeOptionsInstance(options);
for (int i = 1; i < handles.length; i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
}
return db;
}
@ -276,7 +289,7 @@ public class RocksDB extends RocksObject {
throws RocksDBException {
// This allows to use the rocksjni default Options instead of
// the c++ one.
DBOptions options = new DBOptions();
final DBOptions options = new DBOptions();
return openReadOnly(options, path, columnFamilyDescriptors,
columnFamilyHandles);
}
@ -303,9 +316,7 @@ public class RocksDB extends RocksObject {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
RocksDB db = new RocksDB();
db.openROnly(options.nativeHandle_, path);
final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path));
db.storeOptionsInstance(options);
return db;
}
@ -339,14 +350,25 @@ public class RocksDB extends RocksObject {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
RocksDB db = new RocksDB();
List<Long> cfReferences = db.openROnly(options.nativeHandle_, path,
columnFamilyDescriptors, columnFamilyDescriptors.size());
for (int i=0; i<columnFamilyDescriptors.size(); i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, cfReferences.get(i)));
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
.get(i);
cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
}
final long[] handles = openROnly(options.nativeHandle_, path, cfNames,
cfOptionHandles);
final RocksDB db = new RocksDB(handles[0]);
db.storeOptionsInstance(options);
for (int i = 1; i < handles.length; i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
}
return db;
}
/**
@ -362,28 +384,14 @@ public class RocksDB extends RocksObject {
*/
public static List<byte[]> listColumnFamilies(final Options options,
final String path) throws RocksDBException {
return RocksDB.listColumnFamilies(options.nativeHandle_, path);
return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_,
path));
}
private void storeOptionsInstance(DBOptionsInterface options) {
options_ = options;
}
@Override protected void disposeInternal() {
synchronized (this) {
assert (isInitialized());
disposeInternal(nativeHandle_);
}
}
/**
* Close the RocksDB instance.
* This function is equivalent to dispose().
*/
public void close() {
dispose();
}
/**
* Set the database entry for "key" to "value".
*
@ -393,7 +401,8 @@ public class RocksDB extends RocksObject {
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void put(final byte[] key, final byte[] value) throws RocksDBException {
public void put(final byte[] key, final byte[] value)
throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length);
}
@ -452,8 +461,8 @@ public class RocksDB extends RocksObject {
public void put(final ColumnFamilyHandle columnFamilyHandle,
final WriteOptions writeOpts, final byte[] key,
final byte[] value) throws RocksDBException {
put(nativeHandle_, writeOpts.nativeHandle_, key, key.length, value, value.length,
columnFamilyHandle.nativeHandle_);
put(nativeHandle_, writeOpts.nativeHandle_, key, key.length, value,
value.length, columnFamilyHandle.nativeHandle_);
}
/**
@ -469,7 +478,7 @@ public class RocksDB extends RocksObject {
* @return boolean value indicating if key does not exist or might exist.
*/
public boolean keyMayExist(final byte[] key, final StringBuffer value){
return keyMayExist(key, key.length, value);
return keyMayExist(nativeHandle_, key, key.length, value);
}
/**
@ -487,8 +496,8 @@ public class RocksDB extends RocksObject {
*/
public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key, final StringBuffer value){
return keyMayExist(key, key.length, columnFamilyHandle.nativeHandle_,
value);
return keyMayExist(nativeHandle_, key, key.length,
columnFamilyHandle.nativeHandle_, value);
}
/**
@ -506,7 +515,7 @@ public class RocksDB extends RocksObject {
*/
public boolean keyMayExist(final ReadOptions readOptions,
final byte[] key, final StringBuffer value){
return keyMayExist(readOptions.nativeHandle_,
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, key.length, value);
}
@ -527,7 +536,7 @@ public class RocksDB extends RocksObject {
public boolean keyMayExist(final ReadOptions readOptions,
final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
final StringBuffer value){
return keyMayExist(readOptions.nativeHandle_,
return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
key, key.length, columnFamilyHandle.nativeHandle_,
value);
}
@ -543,7 +552,7 @@ public class RocksDB extends RocksObject {
*/
public void write(final WriteOptions writeOpts, final WriteBatch updates)
throws RocksDBException {
write0(writeOpts.nativeHandle_, updates.nativeHandle_);
write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
}
/**
@ -557,7 +566,7 @@ public class RocksDB extends RocksObject {
*/
public void write(final WriteOptions writeOpts,
final WriteBatchWithIndex updates) throws RocksDBException {
write1(writeOpts.nativeHandle_, updates.nativeHandle_);
write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
}
/**
@ -570,7 +579,8 @@ public class RocksDB extends RocksObject {
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final byte[] key, final byte[] value) throws RocksDBException {
public void merge(final byte[] key, final byte[] value)
throws RocksDBException {
merge(nativeHandle_, key, key.length, value, value.length);
}
@ -745,9 +755,10 @@ public class RocksDB extends RocksObject {
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException {
return get(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
return get(nativeHandle_, key, key.length,
columnFamilyHandle.nativeHandle_);
}
/**
@ -803,16 +814,16 @@ public class RocksDB extends RocksObject {
throws RocksDBException {
assert(keys.size() != 0);
List<byte[]> values = multiGet(
nativeHandle_, keys, keys.size());
final byte[][] values = multiGet(nativeHandle_,
keys.toArray(new byte[keys.size()][]));
Map<byte[], byte[]> keyValueMap = new HashMap<>();
for(int i = 0; i < values.size(); i++) {
if(values.get(i) == null) {
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
@ -836,24 +847,30 @@ public class RocksDB extends RocksObject {
* @throws IllegalArgumentException thrown if the size of passed keys is not
* equal to the amount of passed column family handles.
*/
public Map<byte[], byte[]> multiGet(final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException, IllegalArgumentException {
public Map<byte[], byte[]> multiGet(
final List<ColumnFamilyHandle> columnFamilyHandleList,
final List<byte[]> keys) throws RocksDBException,
IllegalArgumentException {
assert(keys.size() != 0);
// Check if key size equals cfList size. If not a exception must be
// thrown. If not a Segmentation fault happens.
if (keys.size()!=columnFamilyHandleList.size()) {
if (keys.size() != columnFamilyHandleList.size()) {
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
List<byte[]> values = multiGet(nativeHandle_, keys, keys.size(),
columnFamilyHandleList);
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] values = multiGet(nativeHandle_,
keys.toArray(new byte[keys.size()][]), cfHandles);
Map<byte[], byte[]> keyValueMap = new HashMap<>();
for(int i = 0; i < values.size(); i++) {
if (values.get(i) == null) {
for(int i = 0; i < values.length; i++) {
if (values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
}
@ -873,16 +890,16 @@ public class RocksDB extends RocksObject {
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
List<byte[]> values = multiGet(
nativeHandle_, opt.nativeHandle_, keys, keys.size());
final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
keys.toArray(new byte[keys.size()][]));
Map<byte[], byte[]> keyValueMap = new HashMap<>();
for(int i = 0; i < values.size(); i++) {
if(values.get(i) == null) {
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
@ -917,16 +934,19 @@ public class RocksDB extends RocksObject {
throw new IllegalArgumentException(
"For each key there must be a ColumnFamilyHandle.");
}
List<byte[]> values = multiGet(nativeHandle_, opt.nativeHandle_,
keys, keys.size(), columnFamilyHandleList);
final long[] cfHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
keys.toArray(new byte[keys.size()][]), cfHandles);
Map<byte[], byte[]> keyValueMap = new HashMap<>();
for(int i = 0; i < values.size(); i++) {
if(values.get(i) == null) {
for(int i = 0; i < values.length; i++) {
if(values[i] == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
keyValueMap.put(keys.get(i), values[i]);
}
return keyValueMap;
@ -958,8 +978,8 @@ public class RocksDB extends RocksObject {
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void remove(final ColumnFamilyHandle columnFamilyHandle, final byte[] key)
throws RocksDBException {
public void remove(final ColumnFamilyHandle columnFamilyHandle,
final byte[] key) throws RocksDBException {
remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
}
@ -1009,8 +1029,9 @@ public class RocksDB extends RocksObject {
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at level &lt;N&gt;,
* where &lt;N&gt; is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
* level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
* number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
@ -1027,8 +1048,8 @@ public class RocksDB extends RocksObject {
*/
public String getProperty(final ColumnFamilyHandle columnFamilyHandle,
final String property) throws RocksDBException {
return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_, property,
property.length());
return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_,
property, property.length());
}
/**
@ -1039,8 +1060,9 @@ public class RocksDB extends RocksObject {
*
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at level &lt;N&gt;,
* where &lt;N&gt; is an ASCII representation of a level number (e.g. "0").</li>
* <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
* level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
* number (e.g. "0").</li>
* <li>"rocksdb.stats" - returns a multi-line string that describes statistics
* about the internal operation of the DB.</li>
* <li>"rocksdb.sstables" - returns a multi-line string that describes all
@ -1058,8 +1080,8 @@ public class RocksDB extends RocksObject {
}
/**
* <p> Similar to GetProperty(), but only works for a subset of properties whose
* return value is a numerical value. Return the value as long.</p>
* <p> Similar to GetProperty(), but only works for a subset of properties
* whose return value is a numerical value. Return the value as long.</p>
*
* <p><strong>Note</strong>: As the returned property is of type
* {@code uint64_t} on C++ side the returning value can be negative
@ -1084,8 +1106,8 @@ public class RocksDB extends RocksObject {
}
/**
* <p> Similar to GetProperty(), but only works for a subset of properties whose
* return value is a numerical value. Return the value as long.</p>
* <p> Similar to GetProperty(), but only works for a subset of properties
* whose return value is a numerical value. Return the value as long.</p>
*
* <p><strong>Note</strong>: As the returned property is of type
* {@code uint64_t} on C++ side the returning value can be negative
@ -1109,8 +1131,8 @@ public class RocksDB extends RocksObject {
*/
public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle,
final String property) throws RocksDBException {
return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_, property,
property.length());
return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_,
property, property.length());
}
/**
@ -1192,7 +1214,8 @@ public class RocksDB extends RocksObject {
* instance
* @return instance of iterator object.
*/
public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) {
public RocksIterator newIterator(
final ColumnFamilyHandle columnFamilyHandle) {
return new RocksIterator(this, iteratorCF(nativeHandle_,
columnFamilyHandle.nativeHandle_));
}
@ -1232,7 +1255,8 @@ public class RocksDB extends RocksObject {
* native library.
*/
public List<RocksIterator> newIterators(
final List<ColumnFamilyHandle> columnFamilyHandleList) throws RocksDBException {
final List<ColumnFamilyHandle> columnFamilyHandleList)
throws RocksDBException {
return newIterators(columnFamilyHandleList, new ReadOptions());
}
@ -1253,11 +1277,17 @@ public class RocksDB extends RocksObject {
public List<RocksIterator> newIterators(
final List<ColumnFamilyHandle> columnFamilyHandleList,
final ReadOptions readOptions) throws RocksDBException {
List<RocksIterator> iterators =
new ArrayList<>(columnFamilyHandleList.size());
long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandleList,
final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
for (int i = 0; i < columnFamilyHandleList.size(); i++) {
columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
readOptions.nativeHandle_);
final List<RocksIterator> iterators = new ArrayList<>(
columnFamilyHandleList.size());
for (int i=0; i<columnFamilyHandleList.size(); i++){
iterators.add(new RocksIterator(this, iteratorRefs[i]));
}
@ -1291,7 +1321,8 @@ public class RocksDB extends RocksObject {
final ColumnFamilyDescriptor columnFamilyDescriptor)
throws RocksDBException {
return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
columnFamilyDescriptor));
columnFamilyDescriptor.columnFamilyName(),
columnFamilyDescriptor.columnFamilyOptions().nativeHandle_));
}
/**
@ -1310,7 +1341,7 @@ public class RocksDB extends RocksObject {
// throws RocksDBException if something goes wrong
dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
// After the drop the native handle is not valid anymore
columnFamilyHandle.nativeHandle_ = 0;
columnFamilyHandle.disOwnNativeHandle();
}
/**
@ -1672,26 +1703,55 @@ public class RocksDB extends RocksObject {
/**
* Private constructor.
*
* @param nativeHandle The native handle of the C++ RocksDB object
*/
protected RocksDB() {
super();
protected RocksDB(final long nativeHandle) {
super(nativeHandle);
}
// native methods
protected native void open(
long optionsHandle, String path) throws RocksDBException;
protected native List<Long> open(long optionsHandle, String path,
List<ColumnFamilyDescriptor> columnFamilyDescriptors,
int columnFamilyDescriptorsLength)
throws RocksDBException;
protected native static List<byte[]> listColumnFamilies(
long optionsHandle, String path) throws RocksDBException;
protected native void openROnly(
protected native static long open(final long optionsHandle,
final String path) throws RocksDBException;
/**
* @param optionsHandle Native handle pointing to an Options object
* @param path The directory path for the database files
* @param columnFamilyNames An array of column family names
* @param columnFamilyOptions An array of native handles pointing to
* ColumnFamilyOptions objects
*
* @return An array of native handles, [0] is the handle of the RocksDB object
* [1..1+n] are handles of the ColumnFamilyReferences
*
* @throws RocksDBException thrown if the database could not be opened
*/
protected native static long[] open(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions) throws RocksDBException;
protected native static long openROnly(final long optionsHandle,
final String path) throws RocksDBException;
/**
* @param optionsHandle Native handle pointing to an Options object
* @param path The directory path for the database files
* @param columnFamilyNames An array of column family names
* @param columnFamilyOptions An array of native handles pointing to
* ColumnFamilyOptions objects
*
* @return An array of native handles, [0] is the handle of the RocksDB object
* [1..1+n] are handles of the ColumnFamilyReferences
*
* @throws RocksDBException thrown if the database could not be opened
*/
protected native static long[] openROnly(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions
) throws RocksDBException;
protected native static byte[][] listColumnFamilies(
long optionsHandle, String path) throws RocksDBException;
protected native List<Long> openROnly(
long optionsHandle, String path,
List<ColumnFamilyDescriptor> columnFamilyDescriptors,
int columnFamilyDescriptorsLength) throws RocksDBException;
protected native void put(
long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
@ -1706,18 +1766,20 @@ public class RocksDB extends RocksObject {
long handle, long writeOptHandle,
byte[] key, int keyLen,
byte[] value, int valueLen, long cfHandle) throws RocksDBException;
protected native void write0(
long writeOptHandle, long wbHandle) throws RocksDBException;
protected native void write1(
long writeOptHandle, long wbwiHandle) throws RocksDBException;
protected native boolean keyMayExist(byte[] key, int keyLen,
StringBuffer stringBuffer);
protected native boolean keyMayExist(byte[] key, int keyLen,
long cfHandle, StringBuffer stringBuffer);
protected native boolean keyMayExist(long optionsHandle, byte[] key, int keyLen,
StringBuffer stringBuffer);
protected native boolean keyMayExist(long optionsHandle, byte[] key, int keyLen,
long cfHandle, StringBuffer stringBuffer);
protected native void write0(final long handle, long writeOptHandle,
long wbHandle) throws RocksDBException;
protected native void write1(final long handle, long writeOptHandle,
long wbwiHandle) throws RocksDBException;
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyLen, final StringBuffer stringBuffer);
protected native boolean keyMayExist(final long handle, final byte[] key,
final int keyLen, final long cfHandle, final StringBuffer stringBuffer);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyLen,
final StringBuffer stringBuffer);
protected native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyLen,
final long cfHandle, final StringBuffer stringBuffer);
protected native void merge(
long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
@ -1744,20 +1806,18 @@ public class RocksDB extends RocksObject {
protected native int get(
long handle, long readOptHandle, byte[] key, int keyLen,
byte[] value, int valueLen, long cfHandle) throws RocksDBException;
protected native List<byte[]> multiGet(
long dbHandle, List<byte[]> keys, int keysCount);
protected native List<byte[]> multiGet(
long dbHandle, List<byte[]> keys, int keysCount, List<ColumnFamilyHandle>
cfHandles);
protected native List<byte[]> multiGet(
long dbHandle, long rOptHandle, List<byte[]> keys, int keysCount);
protected native List<byte[]> multiGet(
long dbHandle, long rOptHandle, List<byte[]> keys, int keysCount,
List<ColumnFamilyHandle> cfHandles);
protected native byte[][] multiGet(final long dbHandle, final byte[][] keys);
protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
final long[] columnFamilyHandles);
protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
final byte[][] keys);
protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
final byte[][] keys, final long[] columnFamilyHandles);
protected native byte[] get(
long handle, byte[] key, int keyLen) throws RocksDBException;
protected native byte[] get(
long handle, byte[] key, int keyLen, long cfHandle) throws RocksDBException;
long handle, byte[] key, int keyLen, long cfHandle)
throws RocksDBException;
protected native byte[] get(
long handle, long readOptHandle,
byte[] key, int keyLen) throws RocksDBException;
@ -1767,7 +1827,8 @@ public class RocksDB extends RocksObject {
protected native void remove(
long handle, byte[] key, int keyLen) throws RocksDBException;
protected native void remove(
long handle, byte[] key, int keyLen, long cfHandle) throws RocksDBException;
long handle, byte[] key, int keyLen, long cfHandle)
throws RocksDBException;
protected native void remove(
long handle, long writeOptHandle,
byte[] key, int keyLen) throws RocksDBException;
@ -1787,34 +1848,36 @@ public class RocksDB extends RocksObject {
protected native long iteratorCF(long handle, long cfHandle);
protected native long iteratorCF(long handle, long cfHandle,
long readOptHandle);
protected native long[] iterators(long handle,
List<ColumnFamilyHandle> columnFamilyNames, long readOptHandle)
protected native long[] iterators(final long handle,
final long[] columnFamilyHandles, final long readOptHandle)
throws RocksDBException;
protected native long getSnapshot(long nativeHandle);
protected native void releaseSnapshot(
long nativeHandle, long snapshotHandle);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
private native long getDefaultColumnFamily(long handle);
private native long createColumnFamily(long handle,
ColumnFamilyDescriptor columnFamilyDescriptor) throws RocksDBException;
private native void dropColumnFamily(long handle, long cfHandle) throws RocksDBException;
private native long createColumnFamily(final long handle,
final byte[] columnFamilyName, final long columnFamilyOptions)
throws RocksDBException;
private native void dropColumnFamily(long handle, long cfHandle)
throws RocksDBException;
private native void flush(long handle, long flushOptHandle)
throws RocksDBException;
private native void flush(long handle, long flushOptHandle,
long cfHandle) throws RocksDBException;
private native void compactRange0(long handle, boolean reduce_level, int target_level,
private native void compactRange0(long handle, boolean reduce_level,
int target_level, int target_path_id) throws RocksDBException;
private native void compactRange0(long handle, byte[] begin, int beginLen,
byte[] end, int endLen, boolean reduce_level, int target_level,
int target_path_id) throws RocksDBException;
private native void compactRange0(long handle, byte[] begin, int beginLen, byte[] end,
int endLen, boolean reduce_level, int target_level, int target_path_id)
private native void compactRange(long handle, boolean reduce_level,
int target_level, int target_path_id, long cfHandle)
throws RocksDBException;
private native void compactRange(long handle, boolean reduce_level, int target_level,
private native void compactRange(long handle, byte[] begin, int beginLen,
byte[] end, int endLen, boolean reduce_level, int target_level,
int target_path_id, long cfHandle) throws RocksDBException;
private native void compactRange(long handle, byte[] begin, int beginLen, byte[] end,
int endLen, boolean reduce_level, int target_level, int target_path_id,
long cfHandle) throws RocksDBException;
private native long getLatestSequenceNumber(long handle);
private native void disableFileDeletions(long handle)
throws RocksDBException;
private native void disableFileDeletions(long handle) throws RocksDBException;
private native void enableFileDeletions(long handle,
boolean force) throws RocksDBException;
private native long getUpdatesSince(long handle, long sequenceNumber)

@ -24,8 +24,7 @@ public class RocksEnv extends Env {
* {@code dispose()} of the created RocksEnv will be no-op.</p>
*/
RocksEnv(final long handle) {
super();
nativeHandle_ = handle;
super(handle);
disOwnNativeHandle();
}
@ -38,6 +37,7 @@ public class RocksEnv extends Env {
* RocksEnv with RocksJava. The default env allocation is managed
* by C++.</p>
*/
@Override protected void disposeInternal() {
@Override
protected final void disposeInternal(final long handle) {
}
}

@ -33,7 +33,7 @@ public class RocksIterator extends AbstractRocksIterator<RocksDB> {
* @return key for the current entry.
*/
public byte[] key() {
assert(isInitialized());
assert(isOwningHandle());
return key0(nativeHandle_);
}
@ -46,11 +46,11 @@ public class RocksIterator extends AbstractRocksIterator<RocksDB> {
* @return value for the current entry.
*/
public byte[] value() {
assert(isInitialized());
assert(isOwningHandle());
return value0(nativeHandle_);
}
@Override final native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
@Override final native boolean isValid0(long handle);
@Override final native void seekToFirst0(long handle);
@Override final native void seekToLast0(long handle);

@ -19,15 +19,9 @@ public class RocksMemEnv extends Env {
* <p>{@code *base_env} must remain live while the result is in use.</p>
*/
public RocksMemEnv() {
super();
nativeHandle_ = createMemEnv();
}
@Override
protected void disposeInternal() {
disposeInternal(nativeHandle_);
super(createMemEnv());
}
private static native long createMemEnv();
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -0,0 +1,69 @@
// Copyright (c) 2016, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* RocksMutableObject is an implementation of {@link AbstractNativeReference}
* whose reference to the underlying native C++ object can change.
*
* <p>The use of {@code RocksMutableObject} should be kept to a minimum, as it
* has synchronization overheads and introduces complexity. Instead it is
* recommended to use {@link RocksObject} where possible.</p>
*/
public abstract class RocksMutableObject extends AbstractNativeReference {
/**
* An mutable reference to the value of the C++ pointer pointing to some
* underlying native RocksDB C++ object.
*/
private long nativeHandle_;
private boolean owningHandle_;
protected RocksMutableObject() {
}
protected RocksMutableObject(final long nativeHandle) {
this.nativeHandle_ = nativeHandle;
this.owningHandle_ = true;
}
public synchronized void setNativeHandle(final long nativeHandle,
final boolean owningNativeHandle) {
this.nativeHandle_ = nativeHandle;
this.owningHandle_ = owningNativeHandle;
}
@Override
protected synchronized boolean isOwningHandle() {
return this.owningHandle_;
}
/**
* Gets the value of the C++ pointer pointing to the underlying
* native C++ object
*
* @return the pointer value for the native object
*/
protected synchronized long getNativeHandle() {
assert (this.nativeHandle_ != 0);
return this.nativeHandle_;
}
@Override
public synchronized final void close() {
if (isOwningHandle()) {
disposeInternal();
this.owningHandle_ = false;
this.nativeHandle_ = 0;
}
}
protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
protected abstract void disposeInternal(final long handle);
}

@ -6,120 +6,36 @@
package org.rocksdb;
/**
* RocksObject is the base-class of all RocksDB classes that has a pointer to
* some c++ {@code rocksdb} object.
*
* RocksObject is an implementation of {@link AbstractNativeReference} which
* has an immutable and therefore thread-safe reference to the underlying
* native C++ RocksDB object.
* <p>
* RocksObject has {@code dispose()} function, which releases its associated c++
* resource.</p>
* RocksObject is the base-class of almost all RocksDB classes that have a
* pointer to some underlying native C++ {@code rocksdb} object.</p>
* <p>
* This function can be either called manually, or being called automatically
* during the regular Java GC process. However, since Java may wrongly assume a
* RocksObject only contains a long member variable and think it is small in size,
* Java may give {@code RocksObject} low priority in the GC process. For this, it is
* suggested to call {@code dispose()} manually. However, it is safe to let
* {@code RocksObject} go out-of-scope without manually calling {@code dispose()}
* as {@code dispose()} will be called in the finalizer during the
* regular GC process.</p>
* The use of {@code RocksObject} should always be preferred over
* {@link RocksMutableObject}.</p>
*/
public abstract class RocksObject {
protected RocksObject() {
nativeHandle_ = 0;
owningHandle_ = true;
}
/**
* Release the c++ object manually pointed by the native handle.
* <p>
* Note that {@code dispose()} will also be called during the GC process
* if it was not called before its {@code RocksObject} went out-of-scope.
* However, since Java may wrongly wrongly assume those objects are
* small in that they seems to only hold a long variable. As a result,
* they might have low priority in the GC process. To prevent this,
* it is suggested to call {@code dispose()} manually.
* </p>
* <p>
* Note that once an instance of {@code RocksObject} has been disposed,
* calling its function will lead undefined behavior.
* </p>
*/
public final synchronized void dispose() {
if (isOwningNativeHandle() && isInitialized()) {
disposeInternal();
}
nativeHandle_ = 0;
disOwnNativeHandle();
}
/**
* The helper function of {@code dispose()} which all subclasses of
* {@code RocksObject} must implement to release their associated
* C++ resource.
*/
protected abstract void disposeInternal();
/**
* Revoke ownership of the native object.
* <p>
* This will prevent the object from attempting to delete the underlying
* native object in its finalizer. This must be used when another object
* takes over ownership of the native object or both will attempt to delete
* the underlying object when garbage collected.
* <p>
* When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set
* {@code nativeHandle_} to 0 without releasing its associated C++ resource.
* As a result, incorrectly use this function may cause memory leak, and this
* function call will not affect the return value of {@code isInitialized()}.
* </p>
* @see #dispose()
* @see #isInitialized()
*/
protected void disOwnNativeHandle() {
owningHandle_ = false;
}
public abstract class RocksObject extends AbstractImmutableNativeReference {
/**
* Returns true if the current {@code RocksObject} is responsible to release
* its native handle.
*
* @return true if the current {@code RocksObject} is responsible to release
* its native handle.
*
* @see #disOwnNativeHandle()
* @see #dispose()
* An immutable reference to the value of the C++ pointer pointing to some
* underlying native RocksDB C++ object.
*/
protected boolean isOwningNativeHandle() {
return owningHandle_;
}
protected final long nativeHandle_;
/**
* Returns true if the associated native handle has been initialized.
*
* @return true if the associated native handle has been initialized.
*
* @see #dispose()
*/
protected boolean isInitialized() {
return (nativeHandle_ != 0);
protected RocksObject(final long nativeHandle) {
super(true);
this.nativeHandle_ = nativeHandle;
}
/**
* Simply calls {@code dispose()} and release its c++ resource if it has not
* yet released.
* Deletes underlying C++ object pointer.
*/
@Override protected void finalize() throws Throwable {
dispose();
super.finalize();
@Override
protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
/**
* A long variable holding c++ pointer pointing to some RocksDB C++ object.
*/
protected long nativeHandle_;
/**
* A flag indicating whether the current {@code RocksObject} is responsible to
* release the c++ object stored in its {@code nativeHandle_}.
*/
private boolean owningHandle_;
protected abstract void disposeInternal(final long handle);
}

@ -29,7 +29,6 @@ public class Slice extends AbstractSlice<byte[]> {
*/
private Slice() {
super();
disOwnNativeHandle();
}
/**
@ -39,8 +38,7 @@ public class Slice extends AbstractSlice<byte[]> {
* @param str String value.
*/
public Slice(final String str) {
super();
createNewSliceFromString(str);
super(createNewSliceFromString(str));
}
/**
@ -51,8 +49,7 @@ public class Slice extends AbstractSlice<byte[]> {
* @param offset offset within the byte array.
*/
public Slice(final byte[] data, final int offset) {
super();
createNewSlice0(data, offset);
super(createNewSlice0(data, offset));
}
/**
@ -62,8 +59,7 @@ public class Slice extends AbstractSlice<byte[]> {
* @param data byte array.
*/
public Slice(final byte[] data) {
super();
createNewSlice1(data);
super(createNewSlice1(data));
}
/**
@ -77,12 +73,14 @@ public class Slice extends AbstractSlice<byte[]> {
*/
@Override
protected void disposeInternal() {
disposeInternalBuf(nativeHandle_);
super.disposeInternal();
final long nativeHandle = getNativeHandle();
disposeInternalBuf(nativeHandle);
super.disposeInternal(nativeHandle);
}
@Override protected final native byte[] data0(long handle);
private native void createNewSlice0(byte[] data, int length);
private native void createNewSlice1(byte[] data);
private native void disposeInternalBuf(long handle);
private native static long createNewSlice0(final byte[] data,
final int length);
private native static long createNewSlice1(final byte[] data);
private native void disposeInternalBuf(final long handle);
}

@ -10,8 +10,7 @@ package org.rocksdb;
*/
public class Snapshot extends RocksObject {
Snapshot(final long nativeHandle) {
super();
nativeHandle_ = nativeHandle;
super(nativeHandle);
}
/**
@ -21,7 +20,7 @@ public class Snapshot extends RocksObject {
* this snapshot.
*/
public long getSequenceNumber() {
assert(isInitialized());
assert(isOwningHandle());
return getSequenceNumber(nativeHandle_);
}
@ -30,7 +29,8 @@ public class Snapshot extends RocksObject {
* to the snapshot is released by the database
* instance.
*/
@Override protected void disposeInternal() {
@Override
protected final void disposeInternal(final long handle) {
}
private native long getSequenceNumber(long handle);

@ -57,12 +57,7 @@ public class TransactionLogIterator extends RocksObject {
* @param nativeHandle address to native address.
*/
TransactionLogIterator(final long nativeHandle) {
super();
nativeHandle_ = nativeHandle;
}
@Override protected void disposeInternal() {
disposeInternal(nativeHandle_);
super(nativeHandle);
}
/**
@ -107,7 +102,7 @@ public class TransactionLogIterator extends RocksObject {
private final WriteBatch writeBatch_;
}
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
private native boolean isValid(long handle);
private native void next(long handle);
private native void status(long handle)

@ -84,9 +84,7 @@ public class TtlDB extends RocksDB {
*/
public static TtlDB open(final Options options, final String db_path,
final int ttl, final boolean readOnly) throws RocksDBException {
TtlDB ttldb = new TtlDB();
ttldb.open(options.nativeHandle_, db_path, ttl, readOnly);
return ttldb;
return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly));
}
/**
@ -114,15 +112,29 @@ public class TtlDB extends RocksDB {
final List<Integer> ttlValues, final boolean readOnly)
throws RocksDBException {
if (columnFamilyDescriptors.size() != ttlValues.size()) {
throw new IllegalArgumentException("There must be a ttl value per column" +
"family handle.");
throw new IllegalArgumentException("There must be a ttl value per column"
+ "family handle.");
}
TtlDB ttlDB = new TtlDB();
List<Long> cfReferences = ttlDB.openCF(options.nativeHandle_, db_path,
columnFamilyDescriptors, columnFamilyDescriptors.size(),
ttlValues, readOnly);
for (int i=0; i<columnFamilyDescriptors.size(); i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, cfReferences.get(i)));
final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
final ColumnFamilyDescriptor cfDescriptor =
columnFamilyDescriptors.get(i);
cfNames[i] = cfDescriptor.columnFamilyName();
cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
}
final int ttlVals[] = new int[ttlValues.size()];
for(int i = 0; i < ttlValues.size(); i++) {
ttlVals[i] = ttlValues.get(i);
}
final long[] handles = openCF(options.nativeHandle_, db_path,
cfNames, cfOptionHandles, ttlVals, readOnly);
final TtlDB ttlDB = new TtlDB(handles[0]);
for (int i = 1; i < handles.length; i++) {
columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, handles[i]));
}
return ttlDB;
}
@ -146,10 +158,10 @@ public class TtlDB extends RocksDB {
public ColumnFamilyHandle createColumnFamilyWithTtl(
final ColumnFamilyDescriptor columnFamilyDescriptor,
final int ttl) throws RocksDBException {
assert(isInitialized());
return new ColumnFamilyHandle(this,
createColumnFamilyWithTtl(nativeHandle_,
columnFamilyDescriptor, ttl));
columnFamilyDescriptor.columnFamilyName(),
columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl));
}
/**
@ -161,10 +173,9 @@ public class TtlDB extends RocksDB {
* c++ {@code rocksdb::TtlDB} and should be transparent to
* Java developers.</p>
*/
@Override public synchronized void close() {
if (isInitialized()) {
@Override
public void close() {
super.close();
}
}
/**
@ -175,23 +186,26 @@ public class TtlDB extends RocksDB {
* {@link #open(DBOptions, String, java.util.List, java.util.List,
* java.util.List, boolean)}.
* </p>
*
* @param nativeHandle The native handle of the C++ TtlDB object
*/
protected TtlDB() {
super();
protected TtlDB(final long nativeHandle) {
super(nativeHandle);
}
@Override protected void finalize() throws Throwable {
close();
close(); //TODO(AR) revisit here when implementing AutoCloseable
super.finalize();
}
private native void open(long optionsHandle, String db_path, int ttl,
boolean readOnly) throws RocksDBException;
private native List<Long> openCF(long optionsHandle, String db_path,
List<ColumnFamilyDescriptor> columnFamilyDescriptors,
int columnFamilyDescriptorsLength, List<Integer> ttlValues,
boolean readOnly) throws RocksDBException;
private native long createColumnFamilyWithTtl(long handle,
ColumnFamilyDescriptor columnFamilyDescriptor, int ttl)
private native static long open(final long optionsHandle,
final String db_path, final int ttl, final boolean readOnly)
throws RocksDBException;
private native static long[] openCF(final long optionsHandle,
final String db_path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions, final int[] ttlValues,
final boolean readOnly) throws RocksDBException;
private native long createColumnFamilyWithTtl(final long handle,
final byte[] columnFamilyName, final long columnFamilyOptions, int ttl)
throws RocksDBException;
}

@ -5,10 +5,12 @@
package org.rocksdb;
public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex> {
public class WBWIRocksIterator
extends AbstractRocksIterator<WriteBatchWithIndex> {
private final WriteEntry entry = new WriteEntry();
protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) {
protected WBWIRocksIterator(final WriteBatchWithIndex wbwi,
final long nativeHandle) {
super(wbwi, nativeHandle);
}
@ -20,16 +22,24 @@ public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex
* If you want to keep the WriteEntry across iterator
* movements, you must make a copy of its data!
*
* Note - This method is not thread-safe with respect to the WriteEntry
* as it performs a non-atomic update across the fields of the WriteEntry
*
* @return The WriteEntry of the current entry
*/
public WriteEntry entry() {
assert(isInitialized());
assert(isOwningHandle());
assert(entry != null);
entry1(nativeHandle_, entry);
final long ptrs[] = entry1(nativeHandle_);
entry.type = WriteType.fromId((byte)ptrs[0]);
entry.key.setNativeHandle(ptrs[1], true);
entry.value.setNativeHandle(ptrs[2], ptrs[2] != 0);
return entry;
}
@Override final native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
@Override final native boolean isValid0(long handle);
@Override final native void seekToFirst0(long handle);
@Override final native void seekToLast0(long handle);
@ -38,17 +48,31 @@ public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex
@Override final native void seek0(long handle, byte[] target, int targetLen);
@Override final native void status0(long handle) throws RocksDBException;
private native void entry1(long handle, WriteEntry entry);
private native long[] entry1(final long handle);
/**
* Enumeration of the Write operation
* that created the record in the Write Batch
*/
public enum WriteType {
PUT,
MERGE,
DELETE,
LOG
PUT((byte)0x1),
MERGE((byte)0x2),
DELETE((byte)0x4),
LOG((byte)0x8);
final byte id;
WriteType(final byte id) {
this.id = id;
}
public static WriteType fromId(final byte id) {
for(final WriteType wt : WriteType.values()) {
if(id == wt.id) {
return wt;
}
}
throw new IllegalArgumentException("No WriteType with id=" + id);
}
}
/**
@ -110,7 +134,7 @@ public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex
* no value
*/
public DirectSlice getValue() {
if(!value.isInitialized()) {
if(!value.isOwningHandle()) {
return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty()
} else {
return value;
@ -139,8 +163,7 @@ public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex
final WriteEntry otherWriteEntry = (WriteEntry)other;
return type.equals(otherWriteEntry.type)
&& key.equals(otherWriteEntry.key)
&& (value.isInitialized() ? value.equals(otherWriteEntry.value)
: !otherWriteEntry.value.isInitialized());
&& value.equals(otherWriteEntry.value);
} else {
return false;
}

@ -27,8 +27,7 @@ public class WriteBatch extends AbstractWriteBatch {
* Constructs a WriteBatch instance.
*/
public WriteBatch() {
super();
newWriteBatch(0);
this(0);
}
/**
@ -37,8 +36,7 @@ public class WriteBatch extends AbstractWriteBatch {
* @param reserved_bytes reserved size for WriteBatch
*/
public WriteBatch(final int reserved_bytes) {
nativeHandle_ = 0;
newWriteBatch(reserved_bytes);
super(newWriteBatch(reserved_bytes));
}
/**
@ -50,7 +48,7 @@ public class WriteBatch extends AbstractWriteBatch {
* @throws RocksDBException If we cannot iterate over the batch
*/
public void iterate(final Handler handler) throws RocksDBException {
iterate(handler.nativeHandle_);
iterate(nativeHandle_, handler.nativeHandle_);
}
/**
@ -61,35 +59,44 @@ public class WriteBatch extends AbstractWriteBatch {
* @param nativeHandle address of native instance.
*/
WriteBatch(final long nativeHandle) {
super();
super(nativeHandle);
disOwnNativeHandle();
nativeHandle_ = nativeHandle;
}
@Override final native void disposeInternal(long handle);
@Override final native int count0();
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen);
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen,
long cfHandle);
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen);
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen,
long cfHandle);
@Override final native void remove(byte[] key, int keyLen);
@Override final native void remove(byte[] key, int keyLen, long cfHandle);
@Override final native void putLogData(byte[] blob, int blobLen);
@Override final native void clear0();
@Override protected final native void disposeInternal(final long handle);
@Override final native int count0(final long handle);
@Override final native void put(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen);
@Override final native void put(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen,
final long cfHandle);
@Override final native void merge(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen);
@Override final native void merge(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen,
final long cfHandle);
@Override final native void remove(final long handle, final byte[] key,
final int keyLen);
@Override final native void remove(final long handle, final byte[] key,
final int keyLen, final long cfHandle);
@Override final native void putLogData(final long handle,
final byte[] blob, final int blobLen);
@Override final native void clear0(final long handle);
private native void newWriteBatch(int reserved_bytes);
private native void iterate(long handlerHandle) throws RocksDBException;
private native static long newWriteBatch(final int reserved_bytes);
private native void iterate(final long handle, final long handlerHandle)
throws RocksDBException;
/**
* Handler callback for iterating over the contents of a batch.
*/
public static abstract class Handler extends RocksObject {
public static abstract class Handler
extends AbstractImmutableNativeReference {
private final long nativeHandle_;
public Handler() {
super();
createNewHandler0();
super(true);
this.nativeHandle_ = createNewHandler0();
}
public abstract void put(byte[] key, byte[] value);
@ -116,11 +123,10 @@ public class WriteBatch extends AbstractWriteBatch {
*/
@Override
protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void createNewHandler0();
private native void disposeInternal(long handle);
private native long createNewHandler0();
private native void disposeInternal(final long handle);
}
}

@ -12,10 +12,10 @@ package org.rocksdb;
* Calling put, merge, remove or putLogData calls the same function
* as with {@link org.rocksdb.WriteBatch} whilst also building an index.
*
* A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator() }to create an iterator
* over the write batch or
* {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} to
* get an iterator for the database with Read-Your-Own-Writes like capability
* A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
* create an iterator over the write batch or
* {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
* to get an iterator for the database with Read-Your-Own-Writes like capability
*/
public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
@ -25,8 +25,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* and duplicate keys operations are retained
*/
public WriteBatchWithIndex() {
super();
newWriteBatchWithIndex();
super(newWriteBatchWithIndex());
}
@ -41,8 +40,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* show two entries with the same key.
*/
public WriteBatchWithIndex(final boolean overwriteKey) {
super();
newWriteBatchWithIndex(overwriteKey);
super(newWriteBatchWithIndex(overwriteKey));
}
/**
@ -58,10 +56,12 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* inserting a duplicate key, in this way an iterator will never
* show two entries with the same key.
*/
public WriteBatchWithIndex(final AbstractComparator<? extends AbstractSlice<?>>
fallbackIndexComparator, final int reservedBytes, final boolean overwriteKey) {
super();
newWriteBatchWithIndex(fallbackIndexComparator.nativeHandle_, reservedBytes, overwriteKey);
public WriteBatchWithIndex(
final AbstractComparator<? extends AbstractSlice<?>>
fallbackIndexComparator, final int reservedBytes,
final boolean overwriteKey) {
super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(),
reservedBytes, overwriteKey));
}
/**
@ -73,10 +73,13 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* time.
*
* @param columnFamilyHandle The column family to iterate over
* @return An iterator for the Write Batch contents, restricted to the column family
* @return An iterator for the Write Batch contents, restricted to the column
* family
*/
public WBWIRocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) {
return new WBWIRocksIterator(this, iterator1(columnFamilyHandle.nativeHandle_));
public WBWIRocksIterator newIterator(
final ColumnFamilyHandle columnFamilyHandle) {
return new WBWIRocksIterator(this, iterator1(nativeHandle_,
columnFamilyHandle.nativeHandle_));
}
/**
@ -90,7 +93,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* @return An iterator for the Write Batch contents
*/
public WBWIRocksIterator newIterator() {
return new WBWIRocksIterator(this, iterator0());
return new WBWIRocksIterator(this, iterator0(nativeHandle_));
}
/**
@ -99,15 +102,19 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
* as a delta and baseIterator as a base
*
* @param columnFamilyHandle The column family to iterate over
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @return An iterator which shows a view comprised of both the database point-in-time
* from baseIterator and modifications made in this write batch.
* @param baseIterator The base iterator,
* e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @return An iterator which shows a view comprised of both the database
* point-in-time from baseIterator and modifications made in this write batch.
*/
public RocksIterator newIteratorWithBase(final ColumnFamilyHandle columnFamilyHandle,
public RocksIterator newIteratorWithBase(
final ColumnFamilyHandle columnFamilyHandle,
final RocksIterator baseIterator) {
RocksIterator iterator = new RocksIterator(
baseIterator.parent_,
iteratorWithBase(columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_));
iteratorWithBase(nativeHandle_,
columnFamilyHandle.nativeHandle_,
baseIterator.nativeHandle_));
//when the iterator is deleted it will also delete the baseIterator
baseIterator.disOwnNativeHandle();
return iterator;
@ -116,34 +123,46 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
/**
* Provides Read-Your-Own-Writes like functionality by
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
* as a delta and baseIterator as a base. Operates on the default column family.
* as a delta and baseIterator as a base. Operates on the default column
* family.
*
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @return An iterator which shows a view comprised of both the database point-in-time
* from baseIterator and modifications made in this write batch.
* @param baseIterator The base iterator,
* e.g. {@link org.rocksdb.RocksDB#newIterator()}
* @return An iterator which shows a view comprised of both the database
* point-in-timefrom baseIterator and modifications made in this write batch.
*/
public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator);
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(),
baseIterator);
}
@Override final native void disposeInternal(long handle);
@Override final native int count0();
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen);
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen,
long cfHandle);
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen);
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen,
long cfHandle);
@Override final native void remove(byte[] key, int keyLen);
@Override final native void remove(byte[] key, int keyLen, long cfHandle);
@Override final native void putLogData(byte[] blob, int blobLen);
@Override final native void clear0();
@Override protected final native void disposeInternal(final long handle);
@Override final native int count0(final long handle);
@Override final native void put(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen);
@Override final native void put(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen,
final long cfHandle);
@Override final native void merge(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen);
@Override final native void merge(final long handle, final byte[] key,
final int keyLen, final byte[] value, final int valueLen,
final long cfHandle);
@Override final native void remove(final long handle, final byte[] key,
final int keyLen);
@Override final native void remove(final long handle, final byte[] key,
final int keyLen, final long cfHandle);
@Override final native void putLogData(final long handle, final byte[] blob,
final int blobLen);
@Override final native void clear0(final long handle);
private native void newWriteBatchWithIndex();
private native void newWriteBatchWithIndex(boolean overwriteKey);
private native void newWriteBatchWithIndex(long fallbackIndexComparatorHandle, int reservedBytes,
boolean overwriteKey);
private native long iterator0();
private native long iterator1(long cfHandle);
private native long iteratorWithBase(long baseIteratorHandle, long cfHandle);
private native static long newWriteBatchWithIndex();
private native static long newWriteBatchWithIndex(final boolean overwriteKey);
private native static long newWriteBatchWithIndex(
final long fallbackIndexComparatorHandle, final int reservedBytes,
final boolean overwriteKey);
private native long iterator0(final long handle);
private native long iterator1(final long handle, final long cfHandle);
private native long iteratorWithBase(final long handle,
final long baseIteratorHandle, final long cfHandle);
}

@ -16,13 +16,8 @@ public class WriteOptions extends RocksObject {
* Construct WriteOptions instance.
*/
public WriteOptions() {
super();
newWriteOptions();
}
super(newWriteOptions());
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
/**
@ -97,10 +92,10 @@ public class WriteOptions extends RocksObject {
return disableWAL(nativeHandle_);
}
private native void newWriteOptions();
private native static long newWriteOptions();
private native void setSync(long handle, boolean flag);
private native boolean sync(long handle);
private native void setDisableWAL(long handle, boolean flag);
private native boolean disableWAL(long handle);
private native void disposeInternal(long handle);
@Override protected final native void disposeInternal(final long handle);
}

@ -8,6 +8,7 @@ package org.rocksdb;
import java.io.IOException;
import java.nio.file.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
@ -39,57 +40,43 @@ public abstract class AbstractComparatorTest {
*
* @throws java.io.IOException if IO error happens.
*/
public void testRoundtrip(final Path db_path) throws IOException, RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options();
opt.setCreateIfMissing(true);
opt.setComparator(getAscendingIntKeyComparator());
public void testRoundtrip(final Path db_path) throws IOException,
RocksDBException {
try (final AbstractComparator comparator = getAscendingIntKeyComparator();
final Options opt = new Options()
.setCreateIfMissing(true)
.setComparator(comparator)) {
// store 10,000 random integer keys
final int ITERATIONS = 10000;
db = RocksDB.open(opt, db_path.toString());
final Random random = new Random();
for (int i = 0; i < ITERATIONS; i++) {
final byte key[] = intToByte(random.nextInt());
if (i > 0 && db.get(key) != null) { // does key already exist (avoid duplicates)
i--; // generate a different key
} else {
db.put(key, "value".getBytes());
try (final RocksDB db = RocksDB.open(opt, db_path.toString())) {
final Random random = new Random();
for (int i = 0; i < ITERATIONS; i++) {
final byte key[] = intToByte(random.nextInt());
// does key already exist (avoid duplicates)
if (i > 0 && db.get(key) != null) {
i--; // generate a different key
} else {
db.put(key, "value".getBytes());
}
}
}
db.close();
// re-open db and read from start to end
// integer keys should be in ascending
// order as defined by SimpleIntComparator
db = RocksDB.open(opt, db_path.toString());
final RocksIterator it = db.newIterator();
it.seekToFirst();
int lastKey = Integer.MIN_VALUE;
int count = 0;
for (it.seekToFirst(); it.isValid(); it.next()) {
final int thisKey = byteToInt(it.key());
assertThat(thisKey).isGreaterThan(lastKey);
lastKey = thisKey;
count++;
}
it.dispose();
db.close();
assertThat(count).isEqualTo(ITERATIONS);
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
try (final RocksDB db = RocksDB.open(opt, db_path.toString());
final RocksIterator it = db.newIterator()) {
it.seekToFirst();
int lastKey = Integer.MIN_VALUE;
int count = 0;
for (it.seekToFirst(); it.isValid(); it.next()) {
final int thisKey = byteToInt(it.key());
assertThat(thisKey).isGreaterThan(lastKey);
lastKey = thisKey;
count++;
}
assertThat(count).isEqualTo(ITERATIONS);
}
}
}
@ -109,80 +96,75 @@ public abstract class AbstractComparatorTest {
public void testRoundtripCf(final Path db_path) throws IOException,
RocksDBException {
DBOptions opt = null;
RocksDB db = null;
List<ColumnFamilyDescriptor> cfDescriptors =
new ArrayList<>();
cfDescriptors.add(new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY));
cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes(),
new ColumnFamilyOptions().setComparator(
getAscendingIntKeyComparator())));
List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
try {
opt = new DBOptions().
setCreateIfMissing(true).
setCreateMissingColumnFamilies(true);
// store 10,000 random integer keys
final int ITERATIONS = 10000;
try(final AbstractComparator comparator = getAscendingIntKeyComparator()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
new ColumnFamilyDescriptor("new_cf".getBytes(),
new ColumnFamilyOptions().setComparator(comparator))
);
db = RocksDB.open(opt, db_path.toString(), cfDescriptors, cfHandles);
assertThat(cfDescriptors.size()).isEqualTo(2);
assertThat(cfHandles.size()).isEqualTo(2);
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
final Random random = new Random();
for (int i = 0; i < ITERATIONS; i++) {
final byte key[] = intToByte(random.nextInt());
if (i > 0 && db.get(cfHandles.get(1), key) != null) {
// does key already exist (avoid duplicates)
i--; // generate a different key
} else {
db.put(cfHandles.get(1), key, "value".getBytes());
try (final DBOptions opt = new DBOptions().
setCreateIfMissing(true).
setCreateMissingColumnFamilies(true)) {
// store 10,000 random integer keys
final int ITERATIONS = 10000;
try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
cfDescriptors, cfHandles)) {
try {
assertThat(cfDescriptors.size()).isEqualTo(2);
assertThat(cfHandles.size()).isEqualTo(2);
final Random random = new Random();
for (int i = 0; i < ITERATIONS; i++) {
final byte key[] = intToByte(random.nextInt());
if (i > 0 && db.get(cfHandles.get(1), key) != null) {
// does key already exist (avoid duplicates)
i--; // generate a different key
} else {
db.put(cfHandles.get(1), key, "value".getBytes());
}
}
} finally {
for (final ColumnFamilyHandle handle : cfHandles) {
handle.close();
}
}
cfHandles.clear();
}
}
for (ColumnFamilyHandle handle : cfHandles) {
handle.dispose();
}
cfHandles.clear();
db.close();
// re-open db and read from start to end
// integer keys should be in ascending
// order as defined by SimpleIntComparator
db = RocksDB.open(opt, db_path.toString(), cfDescriptors, cfHandles);
assertThat(cfDescriptors.size()).isEqualTo(2);
assertThat(cfHandles.size()).isEqualTo(2);
final RocksIterator it = db.newIterator(cfHandles.get(1));
it.seekToFirst();
int lastKey = Integer.MIN_VALUE;
int count = 0;
for (it.seekToFirst(); it.isValid(); it.next()) {
final int thisKey = byteToInt(it.key());
assertThat(thisKey).isGreaterThan(lastKey);
lastKey = thisKey;
count++;
}
it.dispose();
for (ColumnFamilyHandle handle : cfHandles) {
handle.dispose();
}
cfHandles.clear();
db.close();
assertThat(count).isEqualTo(ITERATIONS);
} finally {
for (ColumnFamilyHandle handle : cfHandles) {
handle.dispose();
}
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
// re-open db and read from start to end
// integer keys should be in ascending
// order as defined by SimpleIntComparator
try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
cfDescriptors, cfHandles);
final RocksIterator it = db.newIterator(cfHandles.get(1))) {
try {
assertThat(cfDescriptors.size()).isEqualTo(2);
assertThat(cfHandles.size()).isEqualTo(2);
it.seekToFirst();
int lastKey = Integer.MIN_VALUE;
int count = 0;
for (it.seekToFirst(); it.isValid(); it.next()) {
final int thisKey = byteToInt(it.key());
assertThat(thisKey).isGreaterThan(lastKey);
lastKey = thisKey;
count++;
}
assertThat(count).isEqualTo(ITERATIONS);
} finally {
for (final ColumnFamilyHandle handle : cfHandles) {
handle.close();
}
}
cfHandles.clear();
}
}
}
}

@ -28,148 +28,96 @@ public class BackupEngineTest {
@Test
public void backupDb() throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().setCreateIfMissing(true);
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Open empty database.
try(final Options opt = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(db);
// Create two backups
BackupableDBOptions bopt = null;
try {
bopt = new BackupableDBOptions(
try(final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
verifyNumberOfValidBackups(be, 2);
}
} finally {
if(bopt != null) {
bopt.dispose();
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
verifyNumberOfValidBackups(be, 2);
}
}
}
@Test
public void deleteBackup() throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().setCreateIfMissing(true);
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Open empty database.
try(final Options opt = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(db);
// Create two backups
BackupableDBOptions bopt = null;
try {
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(be, 2);
// Delete the first backup
be.deleteBackup(backupInfo.get(0).backupId());
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(be, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
}
} finally {
if(bopt != null) {
bopt.dispose();
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
try(final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(be, 2);
// Delete the first backup
be.deleteBackup(backupInfo.get(0).backupId());
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(be, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
}
}
}
@Test
public void purgeOldBackups() throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().setCreateIfMissing(true);
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Open empty database.
try(final Options opt = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(db);
// Create four backups
BackupableDBOptions bopt = null;
try {
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
be.createNewBackup(db, true);
be.createNewBackup(db, true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(be, 4);
// Delete everything except the latest backup
be.purgeOldBackups(1);
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(be, 1);
// The latest backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(3).backupId());
}
} finally {
if(bopt != null) {
bopt.dispose();
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
try(final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, false);
be.createNewBackup(db, true);
be.createNewBackup(db, true);
be.createNewBackup(db, true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(be, 4);
// Delete everything except the latest backup
be.purgeOldBackups(1);
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(be, 1);
// The latest backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(3).backupId());
}
}
}
@Test
public void restoreLatestBackup()
throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().setCreateIfMissing(true);
public void restoreLatestBackup() throws RocksDBException {
try(final Options opt = new Options().setCreateIfMissing(true)) {
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(db);
BackupableDBOptions bopt = null;
RocksDB db = null;
try {
bopt = new BackupableDBOptions(
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(db);
try (final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, true);
verifyNumberOfValidBackups(be, 1);
db.put("key1".getBytes(), "valueV2".getBytes());
@ -182,51 +130,44 @@ public class BackupEngineTest {
assertThat(new String(db.get("key2".getBytes()))).endsWith("V3");
db.close();
db = null;
verifyNumberOfValidBackups(be, 2);
// restore db from latest backup
be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(),
new RestoreOptions(false));
try(final RestoreOptions ropts = new RestoreOptions(false)) {
be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(), ropts);
}
// Open database again.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath());
// Values must have suffix V2 because of restoring latest backup.
assertThat(new String(db.get("key1".getBytes()))).endsWith("V2");
assertThat(new String(db.get("key2".getBytes()))).endsWith("V2");
}
} finally {
if(bopt != null) {
bopt.dispose();
if(db != null) {
db.close();
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void restoreFromBackup()
throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().setCreateIfMissing(true);
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(db);
BackupableDBOptions bopt = null;
try(final Options opt = new Options().setCreateIfMissing(true)) {
RocksDB db = null;
try {
bopt = new BackupableDBOptions(
// Open empty database.
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(db);
try (final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
be.createNewBackup(db, true);
verifyNumberOfValidBackups(be, 1);
db.put("key1".getBytes(), "valueV2".getBytes());
@ -240,9 +181,10 @@ public class BackupEngineTest {
//close the database
db.close();
db = null;
//restore the backup
List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2);
final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2);
// restore db from first backup
be.restoreDbFromBackup(backupInfo.get(0).backupId(),
dbFolder.getRoot().getAbsolutePath(),
@ -256,17 +198,10 @@ public class BackupEngineTest {
assertThat(new String(db.get("key2".getBytes()))).endsWith("V1");
}
} finally {
if(bopt != null) {
bopt.dispose();
if(db != null) {
db.close();
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}

@ -16,7 +16,8 @@ import org.junit.rules.ExpectedException;
public class BackupableDBOptionsTest {
private final static String ARBITRARY_PATH = System.getProperty("java.io.tmpdir");
private final static String ARBITRARY_PATH =
System.getProperty("java.io.tmpdir");
@ClassRule
public static final RocksMemoryResource rocksMemoryResource =
@ -30,87 +31,61 @@ public class BackupableDBOptionsTest {
@Test
public void backupDir() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
assertThat(backupableDBOptions.backupDir()).
isEqualTo(ARBITRARY_PATH);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void shareTableFiles() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
boolean value = rand.nextBoolean();
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final boolean value = rand.nextBoolean();
backupableDBOptions.setShareTableFiles(value);
assertThat(backupableDBOptions.shareTableFiles()).
isEqualTo(value);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void sync() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
boolean value = rand.nextBoolean();
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final boolean value = rand.nextBoolean();
backupableDBOptions.setSync(value);
assertThat(backupableDBOptions.sync()).isEqualTo(value);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void destroyOldData() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
boolean value = rand.nextBoolean();
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH);) {
final boolean value = rand.nextBoolean();
backupableDBOptions.setDestroyOldData(value);
assertThat(backupableDBOptions.destroyOldData()).
isEqualTo(value);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void backupLogFiles() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
boolean value = rand.nextBoolean();
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final boolean value = rand.nextBoolean();
backupableDBOptions.setBackupLogFiles(value);
assertThat(backupableDBOptions.backupLogFiles()).
isEqualTo(value);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void backupRateLimit() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
long value = Math.abs(rand.nextLong());
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final long value = Math.abs(rand.nextLong());
backupableDBOptions.setBackupRateLimit(value);
assertThat(backupableDBOptions.backupRateLimit()).
isEqualTo(value);
@ -118,19 +93,14 @@ public class BackupableDBOptionsTest {
backupableDBOptions.setBackupRateLimit(-1);
assertThat(backupableDBOptions.backupRateLimit()).
isEqualTo(0);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void restoreRateLimit() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
long value = Math.abs(rand.nextLong());
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final long value = Math.abs(rand.nextLong());
backupableDBOptions.setRestoreRateLimit(value);
assertThat(backupableDBOptions.restoreRateLimit()).
isEqualTo(value);
@ -138,145 +108,153 @@ public class BackupableDBOptionsTest {
backupableDBOptions.setRestoreRateLimit(-1);
assertThat(backupableDBOptions.restoreRateLimit()).
isEqualTo(0);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void shareFilesWithChecksum() {
BackupableDBOptions backupableDBOptions = null;
try {
backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH);
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
boolean value = rand.nextBoolean();
backupableDBOptions.setShareFilesWithChecksum(value);
assertThat(backupableDBOptions.shareFilesWithChecksum()).
isEqualTo(value);
} finally {
if (backupableDBOptions != null) {
backupableDBOptions.dispose();
}
}
}
@Test
public void failBackupDirIsNull() {
exception.expect(IllegalArgumentException.class);
new BackupableDBOptions(null);
try (final BackupableDBOptions opts = new BackupableDBOptions(null)) {
//no-op
}
}
@Test
public void failBackupDirIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.backupDir();
public void failBackupDirIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.backupDir();
}
}
@Test
public void failSetShareTableFilesIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setShareTableFiles(true);
public void failSetShareTableFilesIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setShareTableFiles(true);
}
}
@Test
public void failShareTableFilesIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.shareTableFiles();
public void failShareTableFilesIfDisposed() {
try (BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.shareTableFiles();
}
}
@Test
public void failSetSyncIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setSync(true);
public void failSetSyncIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setSync(true);
}
}
@Test
public void failSyncIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.sync();
public void failSyncIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.sync();
}
}
@Test
public void failSetDestroyOldDataIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setDestroyOldData(true);
public void failSetDestroyOldDataIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setDestroyOldData(true);
}
}
@Test
public void failDestroyOldDataIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.destroyOldData();
public void failDestroyOldDataIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.destroyOldData();
}
}
@Test
public void failSetBackupLogFilesIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setBackupLogFiles(true);
public void failSetBackupLogFilesIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setBackupLogFiles(true);
}
}
@Test
public void failBackupLogFilesIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.backupLogFiles();
public void failBackupLogFilesIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.backupLogFiles();
}
}
@Test
public void failSetBackupRateLimitIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setBackupRateLimit(1);
public void failSetBackupRateLimitIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setBackupRateLimit(1);
}
}
@Test
public void failBackupRateLimitIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.backupRateLimit();
public void failBackupRateLimitIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.backupRateLimit();
}
}
@Test
public void failSetRestoreRateLimitIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setRestoreRateLimit(1);
public void failSetRestoreRateLimitIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setRestoreRateLimit(1);
}
}
@Test
public void failRestoreRateLimitIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.restoreRateLimit();
public void failRestoreRateLimitIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.restoreRateLimit();
}
}
@Test
public void failSetShareFilesWithChecksumIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.setShareFilesWithChecksum(true);
public void failSetShareFilesWithChecksumIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.setShareFilesWithChecksum(true);
}
}
@Test
public void failShareFilesWithChecksumIfDisposed(){
BackupableDBOptions options = setupUninitializedBackupableDBOptions(
exception);
options.shareFilesWithChecksum();
public void failShareFilesWithChecksumIfDisposed() {
try (final BackupableDBOptions options =
setupUninitializedBackupableDBOptions(exception)) {
options.shareFilesWithChecksum();
}
}
private BackupableDBOptions setupUninitializedBackupableDBOptions(
ExpectedException exception) {
BackupableDBOptions backupableDBOptions =
final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH);
backupableDBOptions.dispose();
backupableDBOptions.close();
exception.expect(AssertionError.class);
return backupableDBOptions;
}

@ -28,74 +28,48 @@ public class BackupableDBTest {
@Test
public void backupDb() throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
} finally {
if (bdb != null) {
bdb.close();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
}
}
}
@Test
public void deleteBackup() throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 2);
// Delete the first backup
bdb.deleteBackup(backupInfo.get(0).backupId());
List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(bdb, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
} finally {
if (bdb != null) {
bdb.close();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 2);
// Delete the first backup
bdb.deleteBackup(backupInfo.get(0).backupId());
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(bdb, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
}
}
}
@ -103,90 +77,61 @@ public class BackupableDBTest {
@Test
public void deleteBackupWithRestoreBackupableDB()
throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
RestoreBackupableDB rdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 2);
// init RestoreBackupableDB
rdb = new RestoreBackupableDB(bopt);
// Delete the first backup
rdb.deleteBackup(backupInfo.get(0).backupId());
// Fetch backup info using RestoreBackupableDB
List<BackupInfo> newBackupInfo = verifyNumberOfValidBackups(rdb, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
} finally {
if (bdb != null) {
bdb.close();
}
if (rdb != null) {
rdb.dispose();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 2);
// init RestoreBackupableDB
try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) {
// Delete the first backup
rdb.deleteBackup(backupInfo.get(0).backupId());
// Fetch backup info using RestoreBackupableDB
List<BackupInfo> newBackupInfo = verifyNumberOfValidBackups(rdb, 1);
// The second backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(1).backupId());
}
}
}
}
@Test
public void purgeOldBackups() throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 4);
// Delete everything except the latest backup
bdb.purgeOldBackups(1);
List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(bdb, 1);
// The latest backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(3).backupId());
} finally {
if (bdb != null) {
bdb.close();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
final List<BackupInfo> backupInfo =
verifyNumberOfValidBackups(bdb, 4);
// Delete everything except the latest backup
bdb.purgeOldBackups(1);
final List<BackupInfo> newBackupInfo =
verifyNumberOfValidBackups(bdb, 1);
// The latest backup must remain.
assertThat(newBackupInfo.get(0).backupId()).
isEqualTo(backupInfo.get(3).backupId());
}
}
}
@ -194,58 +139,43 @@ public class BackupableDBTest {
@Test
public void purgeOldBackupsWithRestoreBackupableDb()
throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
RestoreBackupableDB rdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt =
new BackupableDBOptions(backupFolder.getRoot().getAbsolutePath())
) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
List<BackupInfo> infos = verifyNumberOfValidBackups(bdb, 4);
assertThat(infos.get(1).size()).
isEqualTo(infos.get(2).size());
assertThat(infos.get(1).numberFiles()).
isEqualTo(infos.get(2).numberFiles());
long maxTimeBeforePurge = Long.MIN_VALUE;
for (BackupInfo backupInfo : infos) {
if (maxTimeBeforePurge < backupInfo.timestamp()) {
maxTimeBeforePurge = backupInfo.timestamp();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
// Create two backups
bdb.createNewBackup(false);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
bdb.createNewBackup(true);
List<BackupInfo> infos = verifyNumberOfValidBackups(bdb, 4);
assertThat(infos.get(1).size()).
isEqualTo(infos.get(2).size());
assertThat(infos.get(1).numberFiles()).
isEqualTo(infos.get(2).numberFiles());
long maxTimeBeforePurge = Long.MIN_VALUE;
for (BackupInfo backupInfo : infos) {
if (maxTimeBeforePurge < backupInfo.timestamp()) {
maxTimeBeforePurge = backupInfo.timestamp();
}
}
// init RestoreBackupableDB
try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) {
// the same number of backups must
// exist using RestoreBackupableDB.
verifyNumberOfValidBackups(rdb, 4);
rdb.purgeOldBackups(1);
infos = verifyNumberOfValidBackups(rdb, 1);
assertThat(infos.get(0).timestamp()).
isEqualTo(maxTimeBeforePurge);
}
}
// init RestoreBackupableDB
rdb = new RestoreBackupableDB(bopt);
// the same number of backups must
// exist using RestoreBackupableDB.
verifyNumberOfValidBackups(rdb, 4);
rdb.purgeOldBackups(1);
infos = verifyNumberOfValidBackups(rdb, 1);
assertThat(infos.get(0).timestamp()).
isEqualTo(maxTimeBeforePurge);
} finally {
if (bdb != null) {
bdb.close();
}
if (rdb != null) {
rdb.dispose();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
}
}
}
@ -253,58 +183,44 @@ public class BackupableDBTest {
@Test
public void restoreLatestBackup()
throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
RestoreBackupableDB rdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt =
new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 1);
bdb.put("key1".getBytes(), "valueV2".getBytes());
bdb.put("key2".getBytes(), "valueV2".getBytes());
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
bdb.put("key1".getBytes(), "valueV3".getBytes());
bdb.put("key2".getBytes(), "valueV3".getBytes());
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3");
bdb.close();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 1);
bdb.put("key1".getBytes(), "valueV2".getBytes());
bdb.put("key2".getBytes(), "valueV2".getBytes());
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
bdb.put("key1".getBytes(), "valueV3".getBytes());
bdb.put("key2".getBytes(), "valueV3".getBytes());
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3");
}
// init RestoreBackupableDB
rdb = new RestoreBackupableDB(bopt);
verifyNumberOfValidBackups(rdb, 2);
// restore db from latest backup
rdb.restoreDBFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(),
new RestoreOptions(false));
// Open database again.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Values must have suffix V2 because of restoring latest backup.
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V2");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V2");
} finally {
if (bdb != null) {
bdb.close();
}
if (rdb != null) {
rdb.dispose();
}
if (bopt != null) {
bopt.dispose();
try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) {
verifyNumberOfValidBackups(rdb, 2);
// restore db from latest backup
rdb.restoreDBFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(),
new RestoreOptions(false));
}
if (opt != null) {
opt.dispose();
// Open database again.
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Values must have suffix V2 because of restoring latest backup.
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V2");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V2");
}
}
}
@ -312,59 +228,44 @@ public class BackupableDBTest {
@Test
public void restoreFromBackup()
throws RocksDBException {
Options opt = null;
BackupableDBOptions bopt = null;
BackupableDB bdb = null;
RestoreBackupableDB rdb = null;
try {
opt = new Options().setCreateIfMissing(true);
bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath());
try (final Options opt = new Options().setCreateIfMissing(true);
final BackupableDBOptions bopt = new BackupableDBOptions(
backupFolder.getRoot().getAbsolutePath())) {
assertThat(bopt.backupDir()).isEqualTo(
backupFolder.getRoot().getAbsolutePath());
// Open empty database.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Fill database with some test values
prepareDatabase(bdb);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 1);
bdb.put("key1".getBytes(), "valueV2".getBytes());
bdb.put("key2".getBytes(), "valueV2".getBytes());
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
bdb.put("key1".getBytes(), "valueV3".getBytes());
bdb.put("key2".getBytes(), "valueV3".getBytes());
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3");
bdb.close();
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Fill database with some test values
prepareDatabase(bdb);
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 1);
bdb.put("key1".getBytes(), "valueV2".getBytes());
bdb.put("key2".getBytes(), "valueV2".getBytes());
bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 2);
bdb.put("key1".getBytes(), "valueV3".getBytes());
bdb.put("key2".getBytes(), "valueV3".getBytes());
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3");
}
// init RestoreBackupableDB
rdb = new RestoreBackupableDB(bopt);
List<BackupInfo> backupInfo = verifyNumberOfValidBackups(rdb, 2);
// restore db from first backup
rdb.restoreDBFromBackup(backupInfo.get(0).backupId(),
dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(),
new RestoreOptions(false));
// Open database again.
bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath());
// Values must have suffix V2 because of restoring latest backup.
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V1");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V1");
} finally {
if (bdb != null) {
bdb.close();
try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) {
final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(rdb, 2);
// restore db from first backup
rdb.restoreDBFromBackup(backupInfo.get(0).backupId(),
dbFolder.getRoot().getAbsolutePath(),
dbFolder.getRoot().getAbsolutePath(),
new RestoreOptions(false));
}
if (rdb != null) {
rdb.dispose();
}
if (bopt != null) {
bopt.dispose();
}
if (opt != null) {
opt.dispose();
// Open database again.
try (final BackupableDB bdb = BackupableDB.open(opt, bopt,
dbFolder.getRoot().getAbsolutePath())) {
// Values must have suffix V2 because of restoring latest backup.
assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V1");
assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V1");
}
}
}
@ -372,13 +273,13 @@ public class BackupableDBTest {
/**
* Verify backups.
*
* @param bdb {@link BackupableDB} instance.
* @param bdb {@link BackupableDB} instance.
* @param expectedNumberOfBackups numerical value
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
* part of the library.
*/
private List<BackupInfo> verifyNumberOfValidBackups(BackupableDB bdb,
int expectedNumberOfBackups) throws RocksDBException {
private List<BackupInfo> verifyNumberOfValidBackups(final BackupableDB bdb,
final int expectedNumberOfBackups) throws RocksDBException {
// Verify that backups exist
assertThat(bdb.getCorruptedBackups().length).
isEqualTo(0);
@ -392,13 +293,13 @@ public class BackupableDBTest {
/**
* Verify backups.
*
* @param rdb {@link RestoreBackupableDB} instance.
* @param rdb {@link RestoreBackupableDB} instance.
* @param expectedNumberOfBackups numerical value
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
* part of the library.
*/
private List<BackupInfo> verifyNumberOfValidBackups(
RestoreBackupableDB rdb, int expectedNumberOfBackups)
final RestoreBackupableDB rdb, final int expectedNumberOfBackups)
throws RocksDBException {
// Verify that backups exist
assertThat(rdb.getCorruptedBackups().length).
@ -415,9 +316,9 @@ public class BackupableDBTest {
*
* @param db {@link RocksDB} instance.
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
* part of the library.
*/
private void prepareDatabase(RocksDB db)
private void prepareDatabase(final RocksDB db)
throws RocksDBException {
db.put("key1".getBytes(), "valueV1".getBytes());
db.put("key2".getBytes(), "valueV1".getBytes());

@ -131,34 +131,20 @@ public class BlockBasedTableConfigTest {
@Test
public void blockBasedTableWithFilter() {
Options options = null;
try {
options = new Options();
options.setTableFormatConfig(
new BlockBasedTableConfig().setFilter(
new BloomFilter(10)));
try(final Options options = new Options()
.setTableFormatConfig(new BlockBasedTableConfig()
.setFilter(new BloomFilter(10)))) {
assertThat(options.tableFactoryName()).
isEqualTo("BlockBasedTable");
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void blockBasedTableWithoutFilter() {
Options options = null;
try {
options = new Options();
options.setTableFormatConfig(
new BlockBasedTableConfig().setFilter(null));
try(final Options options = new Options().setTableFormatConfig(
new BlockBasedTableConfig().setFilter(null))) {
assertThat(options.tableFactoryName()).
isEqualTo("BlockBasedTable");
} finally {
if (options != null) {
options.dispose();
}
}
}

@ -22,76 +22,61 @@ public class CheckPointTest {
@Test
public void checkPoint() throws RocksDBException {
RocksDB db = null;
Options options = null;
Checkpoint checkpoint = null;
try {
options = new Options().
setCreateIfMissing(true);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.put("key".getBytes(), "value".getBytes());
checkpoint = Checkpoint.create(db);
checkpoint.createCheckpoint(checkpointFolder.
getRoot().getAbsolutePath() + "/snapshot1");
db.put("key2".getBytes(), "value2".getBytes());
checkpoint.createCheckpoint(checkpointFolder.
getRoot().getAbsolutePath() + "/snapshot2");
db.close();
db = RocksDB.open(options,
checkpointFolder.getRoot().getAbsolutePath() +
"/snapshot1");
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
assertThat(db.get("key2".getBytes())).isNull();
db.close();
db = RocksDB.open(options,
checkpointFolder.getRoot().getAbsolutePath() +
"/snapshot2");
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
assertThat(new String(db.get("key2".getBytes()))).
isEqualTo("value2");
} finally {
if (db != null) {
db.close();
try (final Options options = new Options().
setCreateIfMissing(true)) {
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key".getBytes(), "value".getBytes());
try (final Checkpoint checkpoint = Checkpoint.create(db)) {
checkpoint.createCheckpoint(checkpointFolder.
getRoot().getAbsolutePath() + "/snapshot1");
db.put("key2".getBytes(), "value2".getBytes());
checkpoint.createCheckpoint(checkpointFolder.
getRoot().getAbsolutePath() + "/snapshot2");
}
}
if (options != null) {
options.dispose();
try (final RocksDB db = RocksDB.open(options,
checkpointFolder.getRoot().getAbsolutePath() +
"/snapshot1")) {
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
assertThat(db.get("key2".getBytes())).isNull();
}
if (checkpoint != null) {
checkpoint.dispose();
try (final RocksDB db = RocksDB.open(options,
checkpointFolder.getRoot().getAbsolutePath() +
"/snapshot2")) {
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
assertThat(new String(db.get("key2".getBytes()))).
isEqualTo("value2");
}
}
}
@Test(expected = IllegalArgumentException.class)
public void failIfDbIsNull() {
Checkpoint.create(null);
try (final Checkpoint checkpoint = Checkpoint.create(null)) {
}
}
@Test(expected = IllegalStateException.class)
public void failIfDbNotInitialized() throws RocksDBException {
RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
db.dispose();
Checkpoint.create(db);
try (final RocksDB db = RocksDB.open(
dbFolder.getRoot().getAbsolutePath())) {
db.close();
Checkpoint.create(db);
}
}
@Test(expected = RocksDBException.class)
public void failWithIllegalPath() throws RocksDBException {
RocksDB db = null;
Checkpoint checkpoint = null;
try {
db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
checkpoint = Checkpoint.create(db);
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
final Checkpoint checkpoint = Checkpoint.create(db)) {
checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-");
} finally {
if (db != null) {
db.close();
}
if (checkpoint != null) {
checkpoint.dispose();
}
}
}
}

@ -26,616 +26,386 @@ public class ColumnFamilyOptionsTest {
@Test
public void getColumnFamilyOptionsFromProps() {
ColumnFamilyOptions opt = null;
try {
Properties properties = new Properties();
properties.put("write_buffer_size", "112");
properties.put("max_write_buffer_number", "13");
try (final ColumnFamilyOptions opt = ColumnFamilyOptions.
getColumnFamilyOptionsFromProps(properties)) {
// setup sample properties
Properties properties = new Properties();
properties.put("write_buffer_size", "112");
properties.put("max_write_buffer_number", "13");
opt = ColumnFamilyOptions.
getColumnFamilyOptionsFromProps(properties);
assertThat(opt).isNotNull();
assertThat(String.valueOf(opt.writeBufferSize())).
isEqualTo(properties.get("write_buffer_size"));
assertThat(String.valueOf(opt.maxWriteBufferNumber())).
isEqualTo(properties.get("max_write_buffer_number"));
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void failColumnFamilyOptionsFromPropsWithIllegalValue() {
ColumnFamilyOptions opt = null;
try {
// setup sample properties
Properties properties = new Properties();
properties.put("tomato", "1024");
properties.put("burger", "2");
opt = ColumnFamilyOptions.
getColumnFamilyOptionsFromProps(properties);
// setup sample properties
final Properties properties = new Properties();
properties.put("tomato", "1024");
properties.put("burger", "2");
try (final ColumnFamilyOptions opt =
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) {
assertThat(opt).isNull();
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test(expected = IllegalArgumentException.class)
public void failColumnFamilyOptionsFromPropsWithNullValue() {
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null);
try (final ColumnFamilyOptions opt =
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) {
}
}
@Test(expected = IllegalArgumentException.class)
public void failColumnFamilyOptionsFromPropsWithEmptyProps() {
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(
new Properties());
try (final ColumnFamilyOptions opt =
ColumnFamilyOptions.getColumnFamilyOptionsFromProps(
new Properties())) {
}
}
@Test
public void writeBufferSize() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setWriteBufferSize(longValue);
assertThat(opt.writeBufferSize()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxWriteBufferNumber() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMaxWriteBufferNumber(intValue);
assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void minWriteBufferNumberToMerge() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMinWriteBufferNumberToMerge(intValue);
assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void numLevels() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setNumLevels(intValue);
assertThat(opt.numLevels()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void levelZeroFileNumCompactionTrigger() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevelZeroFileNumCompactionTrigger(intValue);
assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void levelZeroSlowdownWritesTrigger() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevelZeroSlowdownWritesTrigger(intValue);
assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void levelZeroStopWritesTrigger() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevelZeroStopWritesTrigger(intValue);
assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void targetFileSizeBase() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setTargetFileSizeBase(longValue);
assertThat(opt.targetFileSizeBase()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void targetFileSizeMultiplier() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setTargetFileSizeMultiplier(intValue);
assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxBytesForLevelBase() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setMaxBytesForLevelBase(longValue);
assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void levelCompactionDynamicLevelBytes() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setLevelCompactionDynamicLevelBytes(boolValue);
assertThat(opt.levelCompactionDynamicLevelBytes())
.isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxBytesForLevelMultiplier() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMaxBytesForLevelMultiplier(intValue);
assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void expandedCompactionFactor() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setExpandedCompactionFactor(intValue);
assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void sourceCompactionFactor() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setSourceCompactionFactor(intValue);
assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxGrandparentOverlapFactor() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMaxGrandparentOverlapFactor(intValue);
assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void softRateLimit() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
double doubleValue = rand.nextDouble();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final double doubleValue = rand.nextDouble();
opt.setSoftRateLimit(doubleValue);
assertThat(opt.softRateLimit()).isEqualTo(doubleValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void hardRateLimit() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
double doubleValue = rand.nextDouble();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final double doubleValue = rand.nextDouble();
opt.setHardRateLimit(doubleValue);
assertThat(opt.hardRateLimit()).isEqualTo(doubleValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void rateLimitDelayMaxMilliseconds() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setRateLimitDelayMaxMilliseconds(intValue);
assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void arenaBlockSize() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setArenaBlockSize(longValue);
assertThat(opt.arenaBlockSize()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void disableAutoCompactions() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
boolean boolValue = rand.nextBoolean();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setDisableAutoCompactions(boolValue);
assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void purgeRedundantKvsWhileFlush() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
boolean boolValue = rand.nextBoolean();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setPurgeRedundantKvsWhileFlush(boolValue);
assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void verifyChecksumsInCompaction() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
boolean boolValue = rand.nextBoolean();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setVerifyChecksumsInCompaction(boolValue);
assertThat(opt.verifyChecksumsInCompaction()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void filterDeletes() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
boolean boolValue = rand.nextBoolean();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setFilterDeletes(boolValue);
assertThat(opt.filterDeletes()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxSequentialSkipInIterations() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setMaxSequentialSkipInIterations(longValue);
assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void inplaceUpdateSupport() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
boolean boolValue = rand.nextBoolean();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setInplaceUpdateSupport(boolValue);
assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void inplaceUpdateNumLocks() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
long longValue = rand.nextLong();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setInplaceUpdateNumLocks(longValue);
assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void memtablePrefixBloomBits() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
int intValue = rand.nextInt();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMemtablePrefixBloomBits(intValue);
assertThat(opt.memtablePrefixBloomBits()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void memtablePrefixBloomProbes() {
ColumnFamilyOptions opt = null;
try {
int intValue = rand.nextInt();
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMemtablePrefixBloomProbes(intValue);
assertThat(opt.memtablePrefixBloomProbes()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void bloomLocality() {
ColumnFamilyOptions opt = null;
try {
int intValue = rand.nextInt();
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setBloomLocality(intValue);
assertThat(opt.bloomLocality()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxSuccessiveMerges() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
long longValue = rand.nextLong();
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setMaxSuccessiveMerges(longValue);
assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void minPartialMergeOperands() {
ColumnFamilyOptions opt = null;
try {
int intValue = rand.nextInt();
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setMinPartialMergeOperands(intValue);
assertThat(opt.minPartialMergeOperands()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void optimizeFiltersForHits() {
ColumnFamilyOptions opt = null;
try {
boolean aBoolean = rand.nextBoolean();
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean aBoolean = rand.nextBoolean();
opt.setOptimizeFiltersForHits(aBoolean);
assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void memTable() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
opt.setMemTableConfig(new HashLinkedListMemTableConfig());
assertThat(opt.memTableFactoryName()).
isEqualTo("HashLinkedListRepFactory");
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void comparator() throws RocksDBException {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void linkageOfPrepMethods() {
ColumnFamilyOptions options = null;
try {
options = new ColumnFamilyOptions();
try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
options.optimizeUniversalStyleCompaction();
options.optimizeUniversalStyleCompaction(4000);
options.optimizeLevelStyleCompaction();
options.optimizeLevelStyleCompaction(3000);
options.optimizeForPointLookup(10);
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void shouldSetTestPrefixExtractor() {
ColumnFamilyOptions options = null;
try {
options = new ColumnFamilyOptions();
try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
options.useFixedLengthPrefixExtractor(100);
options.useFixedLengthPrefixExtractor(10);
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void shouldSetTestCappedPrefixExtractor() {
ColumnFamilyOptions options = null;
try {
options = new ColumnFamilyOptions();
try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
options.useCappedPrefixExtractor(100);
options.useCappedPrefixExtractor(10);
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void compressionTypes() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
for (CompressionType compressionType :
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions()) {
for (final CompressionType compressionType :
CompressionType.values()) {
columnFamilyOptions.setCompressionType(compressionType);
assertThat(columnFamilyOptions.compressionType()).
@ -643,21 +413,16 @@ public class ColumnFamilyOptionsTest {
assertThat(CompressionType.valueOf("NO_COMPRESSION")).
isEqualTo(CompressionType.NO_COMPRESSION);
}
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void compressionPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions()) {
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>();
for (int i=0; i < columnFamilyOptions.numLevels(); i++) {
for (int i = 0; i < columnFamilyOptions.numLevels(); i++) {
compressionTypeList.add(CompressionType.NO_COMPRESSION);
}
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
@ -666,18 +431,13 @@ public class ColumnFamilyOptionsTest {
assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION);
}
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void differentCompressionsPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions()) {
columnFamilyOptions.setNumLevels(3);
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
@ -697,38 +457,27 @@ public class ColumnFamilyOptionsTest {
CompressionType.SNAPPY_COMPRESSION,
CompressionType.LZ4_COMPRESSION);
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void compactionStyles() {
ColumnFamilyOptions ColumnFamilyOptions = null;
try {
ColumnFamilyOptions = new ColumnFamilyOptions();
for (CompactionStyle compactionStyle :
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions()) {
for (final CompactionStyle compactionStyle :
CompactionStyle.values()) {
ColumnFamilyOptions.setCompactionStyle(compactionStyle);
assertThat(ColumnFamilyOptions.compactionStyle()).
columnFamilyOptions.setCompactionStyle(compactionStyle);
assertThat(columnFamilyOptions.compactionStyle()).
isEqualTo(compactionStyle);
assertThat(CompactionStyle.valueOf("FIFO")).
isEqualTo(CompactionStyle.FIFO);
}
} finally {
if (ColumnFamilyOptions != null) {
ColumnFamilyOptions.dispose();
}
}
}
@Test
public void maxTableFilesSizeFIFO() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
long longValue = rand.nextLong();
// Size has to be positive
longValue = (longValue < 0) ? -longValue : longValue;
@ -736,10 +485,6 @@ public class ColumnFamilyOptionsTest {
opt.setMaxTableFilesSizeFIFO(longValue);
assertThat(opt.maxTableFilesSizeFIFO()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
}

File diff suppressed because it is too large Load Diff

@ -18,18 +18,15 @@ public class ComparatorOptionsTest {
@Test
public void comparatorOptions() {
final ComparatorOptions copt = new ComparatorOptions();
try(final ComparatorOptions copt = new ComparatorOptions()) {
assertThat(copt).isNotNull();
{ // UseAdaptiveMutex test
assertThat(copt).isNotNull();
// UseAdaptiveMutex test
copt.setUseAdaptiveMutex(true);
assertThat(copt.useAdaptiveMutex()).isTrue();
copt.setUseAdaptiveMutex(false);
assertThat(copt.useAdaptiveMutex()).isFalse();
}
copt.dispose();
}
}

@ -79,66 +79,52 @@ public class ComparatorTest {
@Test
public void builtinForwardComparator()
throws RocksDBException {
Options options = null;
RocksDB rocksDB = null;
RocksIterator rocksIterator = null;
try {
options = new Options();
options.setCreateIfMissing(true);
options.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
rocksDB = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
rocksDB.put("abc1".getBytes(), "abc1".getBytes());
rocksDB.put("abc2".getBytes(), "abc2".getBytes());
rocksDB.put("abc3".getBytes(), "abc3".getBytes());
rocksIterator = rocksDB.newIterator();
// Iterate over keys using a iterator
rocksIterator.seekToFirst();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc2".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc2".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isFalse();
// Get last one
rocksIterator.seekToLast();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
// Seek for abc
rocksIterator.seek("abc".getBytes());
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
} finally {
if (rocksIterator != null) {
rocksIterator.dispose();
}
if (rocksDB != null) {
rocksDB.close();
}
if (options != null) {
options.dispose();
try (final Options options = new Options()
.setCreateIfMissing(true)
.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
final RocksDB rocksDb = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())
) {
rocksDb.put("abc1".getBytes(), "abc1".getBytes());
rocksDb.put("abc2".getBytes(), "abc2".getBytes());
rocksDb.put("abc3".getBytes(), "abc3".getBytes());
try(final RocksIterator rocksIterator = rocksDb.newIterator()) {
// Iterate over keys using a iterator
rocksIterator.seekToFirst();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc2".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc2".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isFalse();
// Get last one
rocksIterator.seekToLast();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
// Seek for abc
rocksIterator.seek("abc".getBytes());
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
}
}
}
@ -146,69 +132,56 @@ public class ComparatorTest {
@Test
public void builtinReverseComparator()
throws RocksDBException {
Options options = null;
RocksDB rocksDB = null;
RocksIterator rocksIterator = null;
try {
options = new Options();
options.setCreateIfMissing(true);
options.setComparator(
BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR);
rocksDB = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
rocksDB.put("abc1".getBytes(), "abc1".getBytes());
rocksDB.put("abc2".getBytes(), "abc2".getBytes());
rocksDB.put("abc3".getBytes(), "abc3".getBytes());
rocksIterator = rocksDB.newIterator();
// Iterate over keys using a iterator
rocksIterator.seekToFirst();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc2".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc2".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isFalse();
// Get last one
rocksIterator.seekToLast();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
// Will be invalid because abc is after abc1
rocksIterator.seek("abc".getBytes());
assertThat(rocksIterator.isValid()).isFalse();
// Will be abc3 because the next one after abc999
// is abc3
rocksIterator.seek("abc999".getBytes());
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
} finally {
if (rocksIterator != null) {
rocksIterator.dispose();
}
if (rocksDB != null) {
rocksDB.close();
}
if (options != null) {
options.dispose();
try (final Options options = new Options()
.setCreateIfMissing(true)
.setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR);
final RocksDB rocksDb = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())
) {
rocksDb.put("abc1".getBytes(), "abc1".getBytes());
rocksDb.put("abc2".getBytes(), "abc2".getBytes());
rocksDb.put("abc3".getBytes(), "abc3".getBytes());
try (final RocksIterator rocksIterator = rocksDb.newIterator()) {
// Iterate over keys using a iterator
rocksIterator.seekToFirst();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc2".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc2".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
rocksIterator.next();
assertThat(rocksIterator.isValid()).isFalse();
// Get last one
rocksIterator.seekToLast();
assertThat(rocksIterator.isValid()).isTrue();
assertThat(rocksIterator.key()).isEqualTo(
"abc1".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc1".getBytes());
// Will be invalid because abc is after abc1
rocksIterator.seek("abc".getBytes());
assertThat(rocksIterator.isValid()).isFalse();
// Will be abc3 because the next one after abc999
// is abc3
rocksIterator.seek("abc999".getBytes());
assertThat(rocksIterator.key()).isEqualTo(
"abc3".getBytes());
assertThat(rocksIterator.value()).isEqualTo(
"abc3".getBytes());
}
}
}

@ -8,11 +8,10 @@ package org.rocksdb;
import org.junit.Test;
public class CompressionOptionsTest
{
public class CompressionOptionsTest {
@Test
public void getCompressionType() {
for (CompressionType compressionType : CompressionType.values()) {
for (final CompressionType compressionType : CompressionType.values()) {
String libraryName = compressionType.getLibraryName();
compressionType.equals(CompressionType.getCompressionType(
libraryName));

@ -24,547 +24,339 @@ public class DBOptionsTest {
@Test
public void getDBOptionsFromProps() {
DBOptions opt = null;
try {
// setup sample properties
Properties properties = new Properties();
properties.put("allow_mmap_reads", "true");
properties.put("bytes_per_sync", "13");
opt = DBOptions.getDBOptionsFromProps(properties);
// setup sample properties
final Properties properties = new Properties();
properties.put("allow_mmap_reads", "true");
properties.put("bytes_per_sync", "13");
try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
assertThat(opt).isNotNull();
assertThat(String.valueOf(opt.allowMmapReads())).
isEqualTo(properties.get("allow_mmap_reads"));
assertThat(String.valueOf(opt.bytesPerSync())).
isEqualTo(properties.get("bytes_per_sync"));
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void failDBOptionsFromPropsWithIllegalValue() {
DBOptions opt = null;
try {
// setup sample properties
Properties properties = new Properties();
properties.put("tomato", "1024");
properties.put("burger", "2");
opt = DBOptions.
getDBOptionsFromProps(properties);
// setup sample properties
final Properties properties = new Properties();
properties.put("tomato", "1024");
properties.put("burger", "2");
try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
assertThat(opt).isNull();
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test(expected = IllegalArgumentException.class)
public void failDBOptionsFromPropsWithNullValue() {
DBOptions.getDBOptionsFromProps(null);
try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) {
//no-op
}
}
@Test(expected = IllegalArgumentException.class)
public void failDBOptionsFromPropsWithEmptyProps() {
DBOptions.getDBOptionsFromProps(
new Properties());
try(final DBOptions opt = DBOptions.getDBOptionsFromProps(
new Properties())) {
//no-op
}
}
@Test
public void setIncreaseParallelism() {
DBOptions opt = null;
try {
opt = new DBOptions();
try(final DBOptions opt = new DBOptions()) {
final int threads = Runtime.getRuntime().availableProcessors() * 2;
opt.setIncreaseParallelism(threads);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void createIfMissing() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setCreateIfMissing(boolValue);
assertThat(opt.createIfMissing()).
isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.createIfMissing()).isEqualTo(boolValue);
}
}
@Test
public void createMissingColumnFamilies() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setCreateMissingColumnFamilies(boolValue);
assertThat(opt.createMissingColumnFamilies()).
isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue);
}
}
@Test
public void errorIfExists() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setErrorIfExists(boolValue);
assertThat(opt.errorIfExists()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void paranoidChecks() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setParanoidChecks(boolValue);
assertThat(opt.paranoidChecks()).
isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.paranoidChecks()).isEqualTo(boolValue);
}
}
@Test
public void maxTotalWalSize() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setMaxTotalWalSize(longValue);
assertThat(opt.maxTotalWalSize()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.maxTotalWalSize()).isEqualTo(longValue);
}
}
@Test
public void maxOpenFiles() {
DBOptions opt = null;
try {
opt = new DBOptions();
int intValue = rand.nextInt();
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setMaxOpenFiles(intValue);
assertThat(opt.maxOpenFiles()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void disableDataSync() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setDisableDataSync(boolValue);
assertThat(opt.disableDataSync()).
isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.disableDataSync()).isEqualTo(boolValue);
}
}
@Test
public void useFsync() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setUseFsync(boolValue);
assertThat(opt.useFsync()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void dbLogDir() {
DBOptions opt = null;
try {
opt = new DBOptions();
String str = "path/to/DbLogDir";
try(final DBOptions opt = new DBOptions()) {
final String str = "path/to/DbLogDir";
opt.setDbLogDir(str);
assertThat(opt.dbLogDir()).isEqualTo(str);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void walDir() {
DBOptions opt = null;
try {
opt = new DBOptions();
String str = "path/to/WalDir";
try(final DBOptions opt = new DBOptions()) {
final String str = "path/to/WalDir";
opt.setWalDir(str);
assertThat(opt.walDir()).isEqualTo(str);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void deleteObsoleteFilesPeriodMicros() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setDeleteObsoleteFilesPeriodMicros(longValue);
assertThat(opt.deleteObsoleteFilesPeriodMicros()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue);
}
}
@Test
public void maxBackgroundCompactions() {
DBOptions opt = null;
try {
opt = new DBOptions();
int intValue = rand.nextInt();
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setMaxBackgroundCompactions(intValue);
assertThat(opt.maxBackgroundCompactions()).
isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue);
}
}
@Test
public void maxBackgroundFlushes() {
DBOptions opt = null;
try {
opt = new DBOptions();
int intValue = rand.nextInt();
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setMaxBackgroundFlushes(intValue);
assertThat(opt.maxBackgroundFlushes()).
isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue);
}
}
@Test
public void maxLogFileSize() throws RocksDBException {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setMaxLogFileSize(longValue);
assertThat(opt.maxLogFileSize()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void logFileTimeToRoll() throws RocksDBException {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setLogFileTimeToRoll(longValue);
assertThat(opt.logFileTimeToRoll()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue);
}
}
@Test
public void keepLogFileNum() throws RocksDBException {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setKeepLogFileNum(longValue);
assertThat(opt.keepLogFileNum()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void maxManifestFileSize() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setMaxManifestFileSize(longValue);
assertThat(opt.maxManifestFileSize()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.maxManifestFileSize()).isEqualTo(longValue);
}
}
@Test
public void tableCacheNumshardbits() {
DBOptions opt = null;
try {
opt = new DBOptions();
int intValue = rand.nextInt();
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setTableCacheNumshardbits(intValue);
assertThat(opt.tableCacheNumshardbits()).
isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue);
}
}
@Test
public void walSizeLimitMB() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setWalSizeLimitMB(longValue);
assertThat(opt.walSizeLimitMB()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void walTtlSeconds() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setWalTtlSeconds(longValue);
assertThat(opt.walTtlSeconds()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void manifestPreallocationSize() throws RocksDBException {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setManifestPreallocationSize(longValue);
assertThat(opt.manifestPreallocationSize()).
isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue);
}
}
@Test
public void allowOsBuffer() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllowOsBuffer(boolValue);
assertThat(opt.allowOsBuffer()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void allowMmapReads() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllowMmapReads(boolValue);
assertThat(opt.allowMmapReads()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void allowMmapWrites() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllowMmapWrites(boolValue);
assertThat(opt.allowMmapWrites()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void isFdCloseOnExec() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setIsFdCloseOnExec(boolValue);
assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void statsDumpPeriodSec() {
DBOptions opt = null;
try {
opt = new DBOptions();
int intValue = rand.nextInt();
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setStatsDumpPeriodSec(intValue);
assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void adviseRandomOnOpen() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAdviseRandomOnOpen(boolValue);
assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void useAdaptiveMutex() {
DBOptions opt = null;
try {
opt = new DBOptions();
boolean boolValue = rand.nextBoolean();
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setUseAdaptiveMutex(boolValue);
assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void bytesPerSync() {
DBOptions opt = null;
try {
opt = new DBOptions();
long longValue = rand.nextLong();
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setBytesPerSync(longValue);
assertThat(opt.bytesPerSync()).isEqualTo(longValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void rateLimiterConfig() {
DBOptions options = null;
DBOptions anotherOptions = null;
try {
options = new DBOptions();
RateLimiterConfig rateLimiterConfig =
try(final DBOptions options = new DBOptions();
final DBOptions anotherOptions = new DBOptions()) {
final RateLimiterConfig rateLimiterConfig =
new GenericRateLimiterConfig(1000, 100 * 1000, 1);
options.setRateLimiterConfig(rateLimiterConfig);
// Test with parameter initialization
anotherOptions = new DBOptions();
anotherOptions.setRateLimiterConfig(
new GenericRateLimiterConfig(1000));
} finally {
if (options != null) {
options.dispose();
}
if (anotherOptions != null) {
anotherOptions.dispose();
}
}
}
@Test
public void statistics() {
DBOptions options = new DBOptions();
Statistics statistics = options.createStatistics().
statisticsPtr();
assertThat(statistics).isNotNull();
DBOptions anotherOptions = new DBOptions();
statistics = anotherOptions.statisticsPtr();
assertThat(statistics).isNotNull();
try(final DBOptions options = new DBOptions()) {
Statistics statistics = options.createStatistics().
statisticsPtr();
assertThat(statistics).isNotNull();
try(final DBOptions anotherOptions = new DBOptions()) {
statistics = anotherOptions.statisticsPtr();
assertThat(statistics).isNotNull();
}
}
}
}

@ -18,11 +18,8 @@ public class DirectSliceTest {
@Test
public void directSlice() {
DirectSlice directSlice = null;
DirectSlice otherSlice = null;
try {
directSlice = new DirectSlice("abc");
otherSlice = new DirectSlice("abc");
try(final DirectSlice directSlice = new DirectSlice("abc");
final DirectSlice otherSlice = new DirectSlice("abc")) {
assertThat(directSlice.toString()).isEqualTo("abc");
// clear first slice
directSlice.clear();
@ -32,75 +29,46 @@ public class DirectSliceTest {
// remove prefix
otherSlice.removePrefix(1);
assertThat(otherSlice.toString()).isEqualTo("bc");
} finally {
if (directSlice != null) {
directSlice.dispose();
}
if (otherSlice != null) {
otherSlice.dispose();
}
}
}
@Test
public void directSliceWithByteBuffer() {
DirectSlice directSlice = null;
try {
byte[] data = "Some text".getBytes();
ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1);
buffer.put(data);
buffer.put(data.length, (byte)0);
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1);
buffer.put(data);
buffer.put(data.length, (byte)0);
directSlice = new DirectSlice(buffer);
try(final DirectSlice directSlice = new DirectSlice(buffer)) {
assertThat(directSlice.toString()).isEqualTo("Some text");
} finally {
if (directSlice != null) {
directSlice.dispose();
}
}
}
@Test
public void directSliceWithByteBufferAndLength() {
DirectSlice directSlice = null;
try {
byte[] data = "Some text".getBytes();
ByteBuffer buffer = ByteBuffer.allocateDirect(data.length);
buffer.put(data);
directSlice = new DirectSlice(buffer, 4);
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length);
buffer.put(data);
try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
assertThat(directSlice.toString()).isEqualTo("Some");
} finally {
if (directSlice != null) {
directSlice.dispose();
}
}
}
@Test(expected = AssertionError.class)
public void directSliceInitWithoutDirectAllocation() {
DirectSlice directSlice = null;
try {
byte[] data = "Some text".getBytes();
ByteBuffer buffer = ByteBuffer.wrap(data);
directSlice = new DirectSlice(buffer);
} finally {
if (directSlice != null) {
directSlice.dispose();
}
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data);
try(final DirectSlice directSlice = new DirectSlice(buffer)) {
//no-op
}
}
@Test(expected = AssertionError.class)
public void directSlicePrefixInitWithoutDirectAllocation() {
DirectSlice directSlice = null;
try {
byte[] data = "Some text".getBytes();
ByteBuffer buffer = ByteBuffer.wrap(data);
directSlice = new DirectSlice(buffer, 4);
} finally {
if (directSlice != null) {
directSlice.dispose();
}
final byte[] data = "Some text".getBytes();
final ByteBuffer buffer = ByteBuffer.wrap(data);
try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
//no-op
}
}
}

@ -16,31 +16,23 @@ public class FilterTest {
@Test
public void filter() {
Options options = null;
try {
options = new Options();
// test table config
options.setTableFormatConfig(new BlockBasedTableConfig().
setFilter(new BloomFilter()));
options.dispose();
System.gc();
System.runFinalization();
// new Bloom filter
options = new Options();
BlockBasedTableConfig blockConfig = new BlockBasedTableConfig();
blockConfig.setFilter(new BloomFilter());
options.setTableFormatConfig(blockConfig);
BloomFilter bloomFilter = new BloomFilter(10);
blockConfig.setFilter(bloomFilter);
options.setTableFormatConfig(blockConfig);
System.gc();
System.runFinalization();
blockConfig.setFilter(new BloomFilter(10, false));
options.setTableFormatConfig(blockConfig);
} finally {
if (options != null) {
options.dispose();
// new Bloom filter
final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig();
try(final Options options = new Options()) {
try(final Filter bloomFilter = new BloomFilter()) {
blockConfig.setFilter(bloomFilter);
options.setTableFormatConfig(blockConfig);
}
try(final Filter bloomFilter = new BloomFilter(10)) {
blockConfig.setFilter(bloomFilter);
options.setTableFormatConfig(blockConfig);
}
try(final Filter bloomFilter = new BloomFilter(10, false)) {
blockConfig.setFilter(bloomFilter);
options.setTableFormatConfig(blockConfig);
}
}
}

@ -22,44 +22,28 @@ public class FlushTest {
@Test
public void flush() throws RocksDBException {
RocksDB db = null;
Options options = null;
WriteOptions wOpt = null;
FlushOptions flushOptions = null;
try {
options = new Options();
// Setup options
options.setCreateIfMissing(true);
options.setMaxWriteBufferNumber(10);
options.setMinWriteBufferNumberToMerge(10);
wOpt = new WriteOptions();
flushOptions = new FlushOptions();
flushOptions.setWaitForFlush(true);
try(final Options options = new Options()
.setCreateIfMissing(true)
.setMaxWriteBufferNumber(10)
.setMinWriteBufferNumberToMerge(10);
final WriteOptions wOpt = new WriteOptions()
.setDisableWAL(true);
final FlushOptions flushOptions = new FlushOptions()
.setWaitForFlush(true)) {
assertThat(flushOptions.waitForFlush()).isTrue();
wOpt.setDisableWAL(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
db.put(wOpt, "key1".getBytes(), "value1".getBytes());
db.put(wOpt, "key2".getBytes(), "value2".getBytes());
db.put(wOpt, "key3".getBytes(), "value3".getBytes());
db.put(wOpt, "key4".getBytes(), "value4".getBytes());
assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")).isEqualTo("4");
db.flush(flushOptions);
assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")).
isEqualTo("0");
} finally {
if (flushOptions != null) {
flushOptions.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (wOpt != null) {
wOpt.dispose();
}
try(final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put(wOpt, "key1".getBytes(), "value1".getBytes());
db.put(wOpt, "key2".getBytes(), "value2".getBytes());
db.put(wOpt, "key3".getBytes(), "value3".getBytes());
db.put(wOpt, "key4".getBytes(), "value4".getBytes());
assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
.isEqualTo("4");
db.flush(flushOptions);
assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
.isEqualTo("0");
}
}
}
}

@ -24,81 +24,52 @@ public class InfoLogLevelTest {
@Test
public void testInfoLogLevel() throws RocksDBException,
IOException {
RocksDB db = null;
try {
db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
try (final RocksDB db =
RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
db.put("key".getBytes(), "value".getBytes());
assertThat(getLogContentsWithoutHeader()).isNotEmpty();
} finally {
if (db != null) {
db.close();
}
}
}
@Test
public void testFatalLogLevel() throws RocksDBException,
public void testFatalLogLevel() throws RocksDBException,
IOException {
RocksDB db = null;
Options options = null;
try {
options = new Options().
setCreateIfMissing(true).
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
try (final Options options = new Options().
setCreateIfMissing(true).
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
assertThat(options.infoLogLevel()).
isEqualTo(InfoLogLevel.FATAL_LEVEL);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.put("key".getBytes(), "value".getBytes());
// As InfoLogLevel is set to FATAL_LEVEL, here we expect the log
// content to be empty.
assertThat(getLogContentsWithoutHeader()).isEmpty();
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
}
}
@Test
public void testFatalLogLevelWithDBOptions()
throws RocksDBException, IOException {
RocksDB db = null;
Options options = null;
DBOptions dbOptions = null;
try {
dbOptions = new DBOptions().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
options = new Options(dbOptions,
new ColumnFamilyOptions()).
setCreateIfMissing(true);
try (final DBOptions dbOptions = new DBOptions().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
final Options options = new Options(dbOptions,
new ColumnFamilyOptions()).
setCreateIfMissing(true);
final RocksDB db =
RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
assertThat(dbOptions.infoLogLevel()).
isEqualTo(InfoLogLevel.FATAL_LEVEL);
assertThat(options.infoLogLevel()).
isEqualTo(InfoLogLevel.FATAL_LEVEL);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.put("key".getBytes(), "value".getBytes());
assertThat(getLogContentsWithoutHeader()).isEmpty();
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (dbOptions != null) {
dbOptions.dispose();
}
}
}
@Test(expected = IllegalArgumentException.class)
public void failIfIllegalByteValueProvided() {
InfoLogLevel.getInfoLogLevel((byte)-1);
InfoLogLevel.getInfoLogLevel((byte) -1);
}
@Test
@ -114,9 +85,10 @@ public class InfoLogLevelTest {
* @throws IOException if file is not found.
*/
private String getLogContentsWithoutHeader() throws IOException {
final String separator = Environment.isWindows() ? "\n" : System.getProperty("line.separator");
final String separator = Environment.isWindows() ?
"\n" : System.getProperty("line.separator");
final String[] lines = new String(readAllBytes(get(
dbFolder.getRoot().getAbsolutePath()+ "/LOG"))).split(separator);
dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator);
int first_non_header = lines.length;
// Identify the last line of the header

@ -10,6 +10,7 @@ import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
@ -25,70 +26,61 @@ public class KeyMayExistTest {
@Test
public void keyMayExist() throws RocksDBException {
RocksDB db = null;
DBOptions options = null;
List<ColumnFamilyDescriptor> cfDescriptors =
new ArrayList<>();
List<ColumnFamilyHandle> columnFamilyHandleList =
new ArrayList<>();
try {
options = new DBOptions();
options.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
// open database using cf names
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
new ColumnFamilyDescriptor("new_cf".getBytes())
);
cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes()));
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath(),
cfDescriptors, columnFamilyHandleList);
assertThat(columnFamilyHandleList.size()).
isEqualTo(2);
db.put("key".getBytes(), "value".getBytes());
// Test without column family
StringBuffer retValue = new StringBuffer();
boolean exists = db.keyMayExist("key".getBytes(), retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).
isEqualTo("value");
final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
try (final DBOptions options = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath(),
cfDescriptors, columnFamilyHandleList)) {
try {
assertThat(columnFamilyHandleList.size()).
isEqualTo(2);
db.put("key".getBytes(), "value".getBytes());
// Test without column family
StringBuffer retValue = new StringBuffer();
boolean exists = db.keyMayExist("key".getBytes(), retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
// Test without column family but with readOptions
retValue = new StringBuffer();
exists = db.keyMayExist(new ReadOptions(), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).
isEqualTo("value");
// Test without column family but with readOptions
try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer();
exists = db.keyMayExist(readOptions, "key".getBytes(), retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
}
// Test with column family
retValue = new StringBuffer();
exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).
isEqualTo("value");
// Test with column family
retValue = new StringBuffer();
exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
// Test with column family and readOptions
retValue = new StringBuffer();
exists = db.keyMayExist(new ReadOptions(),
columnFamilyHandleList.get(0), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).
isEqualTo("value");
// Test with column family and readOptions
try (final ReadOptions readOptions = new ReadOptions()) {
retValue = new StringBuffer();
exists = db.keyMayExist(readOptions,
columnFamilyHandleList.get(0), "key".getBytes(),
retValue);
assertThat(exists).isTrue();
assertThat(retValue.toString()).isEqualTo("value");
}
// KeyMayExist in CF1 must return false
assertThat(db.keyMayExist(columnFamilyHandleList.get(1),
"key".getBytes(), retValue)).isFalse();
} finally {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
// KeyMayExist in CF1 must return false
assertThat(db.keyMayExist(columnFamilyHandleList.get(1),
"key".getBytes(), retValue)).isFalse();
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
columnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}

@ -6,6 +6,7 @@ import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
@ -19,202 +20,165 @@ public class LoggerTest {
@Rule
public TemporaryFolder dbFolder = new TemporaryFolder();
private AtomicInteger logMessageCounter = new AtomicInteger();
@Test
public void customLogger() throws RocksDBException {
RocksDB db = null;
logMessageCounter.set(0);
try {
// Setup options
final Options options = new Options().
setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
Logger logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
};
final AtomicInteger logMessageCounter = new AtomicInteger();
try (final Options options = new Options().
setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL).
setCreateIfMissing(true);
final Logger logger = new Logger(options) {
// Create new logger with max log level passed by options
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
}
) {
// Set custom logger to options
options.setLogger(logger);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
// there should be more than zero received log messages in
// debug level.
assertThat(logMessageCounter.get()).isGreaterThan(0);
} finally {
if (db != null) {
db.close();
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
// there should be more than zero received log messages in
// debug level.
assertThat(logMessageCounter.get()).isGreaterThan(0);
}
}
logMessageCounter.set(0);
}
@Test
public void fatalLogger() throws RocksDBException {
RocksDB db = null;
logMessageCounter.set(0);
try {
// Setup options
final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
Logger logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
};
final AtomicInteger logMessageCounter = new AtomicInteger();
try (final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
final Logger logger = new Logger(options) {
// Create new logger with max log level passed by options
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
}
) {
// Set custom logger to options
options.setLogger(logger);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
} finally {
if (db != null) {
db.close();
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
}
}
logMessageCounter.set(0);
}
@Test
public void dbOptionsLogger() throws RocksDBException {
RocksDB db = null;
Logger logger = null;
List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
logMessageCounter.set(0);
try {
// Setup options
final DBOptions options = new DBOptions().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
};
final AtomicInteger logMessageCounter = new AtomicInteger();
try (final DBOptions options = new DBOptions().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
final Logger logger = new Logger(options) {
// Create new logger with max log level passed by options
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
}
) {
// Set custom logger to options
options.setLogger(logger);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(),
cfDescriptors, cfHandles);
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
logMessageCounter.set(0);
} finally {
for (ColumnFamilyHandle columnFamilyHandle : cfHandles) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (logger != null) {
logger.dispose();
final List<ColumnFamilyDescriptor> cfDescriptors =
Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath(),
cfDescriptors, cfHandles)) {
try {
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
} finally {
for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) {
columnFamilyHandle.close();
}
}
}
}
}
@Test
public void setInfoLogLevel() {
Logger logger = null;
try {
// Setup options
final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
};
final AtomicInteger logMessageCounter = new AtomicInteger();
try (final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
final Logger logger = new Logger(options) {
// Create new logger with max log level passed by options
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
}
) {
assertThat(logger.infoLogLevel()).
isEqualTo(InfoLogLevel.FATAL_LEVEL);
logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
assertThat(logger.infoLogLevel()).
isEqualTo(InfoLogLevel.DEBUG_LEVEL);
} finally {
if (logger != null) {
logger.dispose();
}
}
}
@Test
public void changeLogLevelAtRuntime() throws RocksDBException {
RocksDB db = null;
logMessageCounter.set(0);
try {
// Setup options
final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
Logger logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
};
final AtomicInteger logMessageCounter = new AtomicInteger();
try (final Options options = new Options().
setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
setCreateIfMissing(true);
// Create new logger with max log level passed by options
final Logger logger = new Logger(options) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
assertThat(logMsg).isNotNull();
assertThat(logMsg.length()).isGreaterThan(0);
logMessageCounter.incrementAndGet();
}
}
) {
// Set custom logger to options
options.setLogger(logger);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
// change log level to debug level
logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
// there should be zero messages
// using fatal level as log level.
assertThat(logMessageCounter.get()).isEqualTo(0);
db.put("key".getBytes(), "value".getBytes());
db.flush(new FlushOptions().setWaitForFlush(true));
// change log level to debug level
logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
// messages shall be received due to previous actions.
assertThat(logMessageCounter.get()).isNotEqualTo(0);
db.put("key".getBytes(), "value".getBytes());
db.flush(new FlushOptions().setWaitForFlush(true));
} finally {
if (db != null) {
db.close();
// messages shall be received due to previous actions.
assertThat(logMessageCounter.get()).isNotEqualTo(0);
}
}
logMessageCounter.set(0);
}
}

@ -18,9 +18,7 @@ public class MemTableTest {
@Test
public void hashSkipListMemTable() throws RocksDBException {
Options options = null;
try {
options = new Options();
try(final Options options = new Options()) {
// Test HashSkipListMemTableConfig
HashSkipListMemTableConfig memTableConfig =
new HashSkipListMemTableConfig();
@ -40,18 +38,12 @@ public class MemTableTest {
assertThat(memTableConfig.branchingFactor()).
isEqualTo(6);
options.setMemTableConfig(memTableConfig);
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void skipListMemTable() throws RocksDBException {
Options options = null;
try {
options = new Options();
try(final Options options = new Options()) {
SkipListMemTableConfig skipMemTableConfig =
new SkipListMemTableConfig();
assertThat(skipMemTableConfig.lookahead()).
@ -60,19 +52,12 @@ public class MemTableTest {
assertThat(skipMemTableConfig.lookahead()).
isEqualTo(20);
options.setMemTableConfig(skipMemTableConfig);
options.dispose();
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void hashLinkedListMemTable() throws RocksDBException {
Options options = null;
try {
options = new Options();
try(final Options options = new Options()) {
HashLinkedListMemTableConfig hashLinkedListMemTableConfig =
new HashLinkedListMemTableConfig();
assertThat(hashLinkedListMemTableConfig.bucketCount()).
@ -107,18 +92,12 @@ public class MemTableTest {
thresholdUseSkiplist()).
isEqualTo(29);
options.setMemTableConfig(hashLinkedListMemTableConfig);
} finally {
if (options != null) {
options.dispose();
}
}
}
@Test
public void vectorMemTable() throws RocksDBException {
Options options = null;
try {
options = new Options();
try(final Options options = new Options()) {
VectorMemTableConfig vectorMemTableConfig =
new VectorMemTableConfig();
assertThat(vectorMemTableConfig.reservedSize()).
@ -127,11 +106,6 @@ public class MemTableTest {
assertThat(vectorMemTableConfig.reservedSize()).
isEqualTo(123);
options.setMemTableConfig(vectorMemTableConfig);
options.dispose();
} finally {
if (options != null) {
options.dispose();
}
}
}
}

@ -5,6 +5,7 @@
package org.rocksdb;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
@ -27,78 +28,60 @@ public class MergeTest {
@Test
public void stringOption()
throws InterruptedException, RocksDBException {
RocksDB db = null;
Options opt = null;
try {
String db_path_string =
dbFolder.getRoot().getAbsolutePath();
opt = new Options();
opt.setCreateIfMissing(true);
opt.setMergeOperatorName("stringappend");
db = RocksDB.open(opt, db_path_string);
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperatorName("stringappend");
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// writing aa under key
db.put("key".getBytes(), "aa".getBytes());
// merge bb under key
db.merge("key".getBytes(), "bb".getBytes());
byte[] value = db.get("key".getBytes());
String strValue = new String(value);
final byte[] value = db.get("key".getBytes());
final String strValue = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void cFStringOption()
throws InterruptedException, RocksDBException {
RocksDB db = null;
DBOptions opt = null;
List<ColumnFamilyHandle> columnFamilyHandleList =
new ArrayList<>();
try {
String db_path_string =
dbFolder.getRoot().getAbsolutePath();
opt = new DBOptions();
opt.setCreateIfMissing(true);
opt.setCreateMissingColumnFamilies(true);
List<ColumnFamilyDescriptor> cfDescriptors =
new ArrayList<>();
cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions().setMergeOperatorName(
"stringappend")));
cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions().setMergeOperatorName(
"stringappend")));
db = RocksDB.open(opt, db_path_string,
cfDescriptors, columnFamilyHandleList);
// writing aa under key
db.put(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "aa".getBytes());
// merge bb under key
db.merge(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "bb".getBytes());
byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes());
String strValue = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
} finally {
for (ColumnFamilyHandle handle : columnFamilyHandleList) {
handle.dispose();
}
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
.setMergeOperatorName("stringappend");
final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
.setMergeOperatorName("stringappend")
) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2)
);
final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
try (final DBOptions opt = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
columnFamilyHandleList)) {
try {
// writing aa under key
db.put(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "aa".getBytes());
// merge bb under key
db.merge(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "bb".getBytes());
byte[] value = db.get(columnFamilyHandleList.get(1),
"cfkey".getBytes());
String strValue = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
} finally {
for (final ColumnFamilyHandle handle : columnFamilyHandleList) {
handle.close();
}
}
}
}
}
@ -106,99 +89,85 @@ public class MergeTest {
@Test
public void operatorOption()
throws InterruptedException, RocksDBException {
RocksDB db = null;
Options opt = null;
try {
String db_path_string =
dbFolder.getRoot().getAbsolutePath();
opt = new Options();
opt.setCreateIfMissing(true);
StringAppendOperator stringAppendOperator = new StringAppendOperator();
opt.setMergeOperator(stringAppendOperator);
db = RocksDB.open(opt, db_path_string);
final StringAppendOperator stringAppendOperator =
new StringAppendOperator();
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
// Writing aa under key
db.put("key".getBytes(), "aa".getBytes());
// Writing bb under key
db.merge("key".getBytes(), "bb".getBytes());
byte[] value = db.get("key".getBytes());
String strValue = new String(value);
final byte[] value = db.get("key".getBytes());
final String strValue = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void cFOperatorOption()
throws InterruptedException, RocksDBException {
RocksDB db = null;
DBOptions opt = null;
ColumnFamilyHandle cfHandle = null;
List<ColumnFamilyDescriptor> cfDescriptors =
new ArrayList<>();
List<ColumnFamilyHandle> columnFamilyHandleList =
new ArrayList<>();
try {
String db_path_string =
dbFolder.getRoot().getAbsolutePath();
opt = new DBOptions();
opt.setCreateIfMissing(true);
opt.setCreateMissingColumnFamilies(true);
StringAppendOperator stringAppendOperator = new StringAppendOperator();
cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions().setMergeOperator(
stringAppendOperator)));
cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes(),
new ColumnFamilyOptions().setMergeOperator(
stringAppendOperator)));
db = RocksDB.open(opt, db_path_string,
cfDescriptors, columnFamilyHandleList);
// writing aa under key
db.put(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "aa".getBytes());
// merge bb under key
db.merge(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "bb".getBytes());
byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes());
String strValue = new String(value);
// Test also with createColumnFamily
cfHandle = db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf2".getBytes(),
new ColumnFamilyOptions().setMergeOperator(stringAppendOperator)));
// writing xx under cfkey2
db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes());
// merge yy under cfkey2
db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), "yy".getBytes());
value = db.get(cfHandle, "cfkey2".getBytes());
String strValueTmpCf = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
assertThat(strValueTmpCf).isEqualTo("xx,yy");
} finally {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (cfHandle != null) {
cfHandle.dispose();
}
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
final StringAppendOperator stringAppendOperator =
new StringAppendOperator();
try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator);
final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator)
) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2)
);
final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
try (final DBOptions opt = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
columnFamilyHandleList)
) {
try {
// writing aa under key
db.put(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "aa".getBytes());
// merge bb under key
db.merge(columnFamilyHandleList.get(1),
"cfkey".getBytes(), "bb".getBytes());
byte[] value = db.get(columnFamilyHandleList.get(1),
"cfkey".getBytes());
String strValue = new String(value);
// Test also with createColumnFamily
try (final ColumnFamilyOptions cfHandleOpts =
new ColumnFamilyOptions()
.setMergeOperator(stringAppendOperator);
final ColumnFamilyHandle cfHandle =
db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf2".getBytes(),
cfHandleOpts))
) {
// writing xx under cfkey2
db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes());
// merge yy under cfkey2
db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(),
"yy".getBytes());
value = db.get(cfHandle, "cfkey2".getBytes());
String strValueTmpCf = new String(value);
assertThat(strValue).isEqualTo("aa,bb");
assertThat(strValueTmpCf).isEqualTo("xx,yy");
}
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
columnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@ -206,97 +175,67 @@ public class MergeTest {
@Test
public void operatorGcBehaviour()
throws RocksDBException {
Options opt = null;
RocksDB db = null;
try {
String db_path_string =
dbFolder.getRoot().getAbsolutePath();
opt = new Options();
opt.setCreateIfMissing(true);
StringAppendOperator stringAppendOperator = new StringAppendOperator();
opt.setMergeOperator(stringAppendOperator);
db = RocksDB.open(opt, db_path_string);
db.close();
opt.dispose();
System.gc();
System.runFinalization();
// test reuse
opt = new Options();
opt.setMergeOperator(stringAppendOperator);
db = RocksDB.open(opt, db_path_string);
db.close();
opt.dispose();
System.gc();
System.runFinalization();
// test param init
opt = new Options();
opt.setMergeOperator(new StringAppendOperator());
db = RocksDB.open(opt, db_path_string);
db.close();
opt.dispose();
System.gc();
System.runFinalization();
// test replace one with another merge operator instance
opt = new Options();
opt.setMergeOperator(stringAppendOperator);
StringAppendOperator newStringAppendOperator = new StringAppendOperator();
final StringAppendOperator stringAppendOperator
= new StringAppendOperator();
try (final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test reuse
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test param init
try (final Options opt = new Options()
.setMergeOperator(new StringAppendOperator());
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
// test replace one with another merge operator instance
try (final Options opt = new Options()
.setMergeOperator(stringAppendOperator)) {
final StringAppendOperator newStringAppendOperator
= new StringAppendOperator();
opt.setMergeOperator(newStringAppendOperator);
db = RocksDB.open(opt, db_path_string);
db.close();
opt.dispose();
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
try (final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
}
}
@Test
public void emptyStringInSetMergeOperatorByName() {
Options opt = null;
ColumnFamilyOptions cOpt = null;
try {
opt = new Options();
cOpt = new ColumnFamilyOptions();
opt.setMergeOperatorName("");
cOpt.setMergeOperatorName("");
} finally {
if (opt != null) {
opt.dispose();
}
if (cOpt != null) {
cOpt.dispose();
}
try (final Options opt = new Options()
.setMergeOperatorName("");
final ColumnFamilyOptions cOpt = new ColumnFamilyOptions()
.setMergeOperatorName("")) {
//no-op
}
}
@Test(expected = IllegalArgumentException.class)
public void nullStringInSetMergeOperatorByNameOptions() {
Options opt = null;
try {
opt = new Options();
try (final Options opt = new Options()) {
opt.setMergeOperatorName(null);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test(expected = IllegalArgumentException.class)
public void
nullStringInSetMergeOperatorByNameColumnFamilyOptions() {
ColumnFamilyOptions opt = null;
try {
opt = new ColumnFamilyOptions();
nullStringInSetMergeOperatorByNameColumnFamilyOptions() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
opt.setMergeOperatorName(null);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
}

@ -19,38 +19,37 @@ public class MixedOptionsTest {
@Test
public void mixedOptionsTest(){
// Set a table factory and check the names
ColumnFamilyOptions cfOptions = new ColumnFamilyOptions();
cfOptions.setTableFormatConfig(new BlockBasedTableConfig().
setFilter(new BloomFilter()));
assertThat(cfOptions.tableFactoryName()).isEqualTo(
"BlockBasedTable");
cfOptions.setTableFormatConfig(new PlainTableConfig());
assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable");
// Initialize a dbOptions object from cf options and
// db options
DBOptions dbOptions = new DBOptions();
Options options = new Options(dbOptions, cfOptions);
assertThat(options.tableFactoryName()).isEqualTo("PlainTable");
// Free instances
options.dispose();
options = null;
cfOptions.dispose();
cfOptions = null;
dbOptions.dispose();
dbOptions = null;
System.gc();
System.runFinalization();
try(final Filter bloomFilter = new BloomFilter();
final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
.setTableFormatConfig(
new BlockBasedTableConfig().setFilter(bloomFilter))
) {
assertThat(cfOptions.tableFactoryName()).isEqualTo(
"BlockBasedTable");
cfOptions.setTableFormatConfig(new PlainTableConfig());
assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable");
// Initialize a dbOptions object from cf options and
// db options
try (final DBOptions dbOptions = new DBOptions();
final Options options = new Options(dbOptions, cfOptions)) {
assertThat(options.tableFactoryName()).isEqualTo("PlainTable");
// Free instances
}
}
// Test Optimize for statements
cfOptions = new ColumnFamilyOptions();
try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) {
cfOptions.optimizeUniversalStyleCompaction();
cfOptions.optimizeLevelStyleCompaction();
cfOptions.optimizeForPointLookup(1024);
options = new Options();
options.optimizeLevelStyleCompaction();
options.optimizeLevelStyleCompaction(400);
options.optimizeUniversalStyleCompaction();
options.optimizeUniversalStyleCompaction(400);
options.optimizeForPointLookup(1024);
options.prepareForBulkLoad();
try(final Options options = new Options()) {
options.optimizeLevelStyleCompaction();
options.optimizeLevelStyleCompaction(400);
options.optimizeUniversalStyleCompaction();
options.optimizeUniversalStyleCompaction(400);
options.optimizeForPointLookup(1024);
options.prepareForBulkLoad();
}
}
}
}

@ -23,7 +23,7 @@ public class NativeLibraryLoaderTest {
public void tempFolder() throws IOException {
NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
temporaryFolder.getRoot().getAbsolutePath());
Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(),
final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(),
Environment.getJniLibraryFileName("rocksdb"));
assertThat(Files.exists(path)).isTrue();
assertThat(Files.isReadable(path)).isTrue();

File diff suppressed because it is too large Load Diff

@ -80,16 +80,10 @@ public class PlainTableConfigTest {
@Test
public void plainTableConfig() {
Options opt = null;
try {
opt = new Options();
PlainTableConfig plainTableConfig = new PlainTableConfig();
try(final Options opt = new Options()) {
final PlainTableConfig plainTableConfig = new PlainTableConfig();
opt.setTableFormatConfig(plainTableConfig);
assertThat(opt.tableFactoryName()).isEqualTo("PlainTable");
} finally {
if (opt != null) {
opt.dispose();
}
}
}
}

@ -18,7 +18,7 @@ public class PlatformRandomHelper {
* @return boolean value indicating if operating system is 64 Bit.
*/
public static boolean isOs64Bit(){
boolean is64Bit;
final boolean is64Bit;
if (System.getProperty("os.name").contains("Windows")) {
is64Bit = (System.getenv("ProgramFiles(x86)") != null);
} else {

@ -10,6 +10,7 @@ import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
@ -25,340 +26,279 @@ public class ReadOnlyTest {
@Test
public void readOnlyOpen() throws RocksDBException {
RocksDB db = null;
RocksDB db2 = null;
RocksDB db3 = null;
Options options = null;
List<ColumnFamilyHandle> columnFamilyHandleList =
new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList2 =
new ArrayList<>();
try {
options = new Options();
options.setCreateIfMissing(true);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options()
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key".getBytes(), "value".getBytes());
db2 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath());
assertThat("value").
isEqualTo(new String(db2.get("key".getBytes())));
db.close();
db2.close();
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
db = RocksDB.open(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList);
columnFamilyHandleList.add(db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf".getBytes(), new ColumnFamilyOptions())));
columnFamilyHandleList.add(db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf2".getBytes(), new ColumnFamilyOptions())));
db.put(columnFamilyHandleList.get(2), "key2".getBytes(),
"value2".getBytes());
db2 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
assertThat(db2.get("key2".getBytes())).isNull();
assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0), "key2".getBytes())).
isNull();
cfDescriptors.clear();
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
cfDescriptors.add(
new ColumnFamilyDescriptor("new_cf2".getBytes(), new ColumnFamilyOptions()));
db3 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors, readOnlyColumnFamilyHandleList2);
assertThat(new String(db3.get(readOnlyColumnFamilyHandleList2.get(1),
"key2".getBytes()))).isEqualTo("value2");
} finally {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
try (final RocksDB db2 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath())) {
assertThat("value").
isEqualTo(new String(db2.get("key".getBytes())));
}
if (db2 != null) {
db2.close();
}
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList2) {
columnFamilyHandle.dispose();
}
if (db3 != null) {
db3.close();
}
if (options != null) {
options.dispose();
}
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
cfDescriptors.add(new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts));
final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
cfDescriptors, columnFamilyHandleList)) {
try (final ColumnFamilyOptions newCfOpts = new ColumnFamilyOptions();
final ColumnFamilyOptions newCf2Opts = new ColumnFamilyOptions()
) {
columnFamilyHandleList.add(db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf".getBytes(), newCfOpts)));
columnFamilyHandleList.add(db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf2".getBytes(), newCf2Opts)));
db.put(columnFamilyHandleList.get(2), "key2".getBytes(),
"value2".getBytes());
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB db2 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList)) {
try (final ColumnFamilyOptions newCfOpts2 =
new ColumnFamilyOptions();
final ColumnFamilyOptions newCf2Opts2 =
new ColumnFamilyOptions()
) {
assertThat(db2.get("key2".getBytes())).isNull();
assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0),
"key2".getBytes())).
isNull();
cfDescriptors.clear();
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
newCfOpts2));
cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(),
newCf2Opts2));
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList2
= new ArrayList<>();
try (final RocksDB db3 = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList2)) {
try {
assertThat(new String(db3.get(
readOnlyColumnFamilyHandleList2.get(1),
"key2".getBytes()))).isEqualTo("value2");
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList2) {
columnFamilyHandle.close();
}
}
}
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
columnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToWriteInReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
try (final Options options = new Options()
.setCreateIfMissing(true)) {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
}
options = new Options();
options.setCreateIfMissing(true);
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
rDb = RocksDB.openReadOnly(
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
// test that put fails in readonly mode
rDb.put("key".getBytes(), "value".getBytes());
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
readOnlyColumnFamilyHandleList)) {
try {
// test that put fails in readonly mode
rDb.put("key".getBytes(), "value".getBytes());
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToCFWriteInReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
options = new Options();
options.setCreateIfMissing(true);
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
rDb = RocksDB.openReadOnly(
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
rDb.put(readOnlyColumnFamilyHandleList.get(0),
"key".getBytes(), "value".getBytes());
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
readOnlyColumnFamilyHandleList)) {
try {
rDb.put(readOnlyColumnFamilyHandleList.get(0),
"key".getBytes(), "value".getBytes());
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToRemoveInReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
options = new Options();
options.setCreateIfMissing(true);
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
rDb.remove("key".getBytes());
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList)) {
try {
rDb.remove("key".getBytes());
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToCFRemoveInReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
options = new Options();
options.setCreateIfMissing(true);
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
rDb = RocksDB.openReadOnly(
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
rDb.remove(readOnlyColumnFamilyHandleList.get(0),
"key".getBytes());
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
readOnlyColumnFamilyHandleList)) {
try {
rDb.remove(readOnlyColumnFamilyHandleList.get(0),
"key".getBytes());
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToWriteBatchReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
options = new Options();
options.setCreateIfMissing(true);
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
rDb = RocksDB.openReadOnly(
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
WriteBatch wb = new WriteBatch();
wb.put("key".getBytes(), "value".getBytes());
rDb.write(new WriteOptions(), wb);
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
final WriteBatch wb = new WriteBatch();
final WriteOptions wOpts = new WriteOptions()) {
try {
wb.put("key".getBytes(), "value".getBytes());
rDb.write(wOpts, wb);
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}
@Test(expected = RocksDBException.class)
public void failToCFWriteBatchReadOnly() throws RocksDBException {
RocksDB db = null;
RocksDB rDb = null;
Options options = null;
WriteBatch wb = null;
List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try {
cfDescriptors.add(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions()));
options = new Options();
options.setCreateIfMissing(true);
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
//no-op
}
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
db.close();
try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
);
rDb = RocksDB.openReadOnly(
final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
new ArrayList<>();
try (final RocksDB rDb = RocksDB.openReadOnly(
dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
readOnlyColumnFamilyHandleList);
wb = new WriteBatch();
wb.put(readOnlyColumnFamilyHandleList.get(0),
"key".getBytes(), "value".getBytes());
rDb.write(new WriteOptions(), wb);
} finally {
for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (db != null) {
db.close();
}
if (rDb != null) {
rDb.close();
}
if (options != null) {
options.dispose();
}
if (wb != null) {
wb.dispose();
final WriteBatch wb = new WriteBatch();
final WriteOptions wOpts = new WriteOptions()) {
try {
wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(),
"value".getBytes());
rDb.write(wOpts, wb);
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
readOnlyColumnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
}

@ -24,127 +24,111 @@ public class ReadOptionsTest {
public ExpectedException exception = ExpectedException.none();
@Test
public void verifyChecksum(){
ReadOptions opt = null;
try {
opt = new ReadOptions();
Random rand = new Random();
boolean boolValue = rand.nextBoolean();
public void verifyChecksum() {
try (final ReadOptions opt = new ReadOptions()) {
final Random rand = new Random();
final boolean boolValue = rand.nextBoolean();
opt.setVerifyChecksums(boolValue);
assertThat(opt.verifyChecksums()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void fillCache(){
ReadOptions opt = null;
try {
opt = new ReadOptions();
Random rand = new Random();
boolean boolValue = rand.nextBoolean();
public void fillCache() {
try (final ReadOptions opt = new ReadOptions()) {
final Random rand = new Random();
final boolean boolValue = rand.nextBoolean();
opt.setFillCache(boolValue);
assertThat(opt.fillCache()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void tailing(){
ReadOptions opt = null;
try {
opt = new ReadOptions();
Random rand = new Random();
boolean boolValue = rand.nextBoolean();
public void tailing() {
try (final ReadOptions opt = new ReadOptions()) {
final Random rand = new Random();
final boolean boolValue = rand.nextBoolean();
opt.setTailing(boolValue);
assertThat(opt.tailing()).isEqualTo(boolValue);
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void snapshot(){
ReadOptions opt = null;
try {
opt = new ReadOptions();
public void snapshot() {
try (final ReadOptions opt = new ReadOptions()) {
opt.setSnapshot(null);
assertThat(opt.snapshot()).isNull();
} finally {
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void failSetVerifyChecksumUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.setVerifyChecksums(true);
public void failSetVerifyChecksumUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.setVerifyChecksums(true);
}
}
@Test
public void failVerifyChecksumUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.verifyChecksums();
public void failVerifyChecksumUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.verifyChecksums();
}
}
@Test
public void failSetFillCacheUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.setFillCache(true);
public void failSetFillCacheUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.setFillCache(true);
}
}
@Test
public void failFillCacheUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.fillCache();
public void failFillCacheUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.fillCache();
}
}
@Test
public void failSetTailingUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.setTailing(true);
public void failSetTailingUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.setTailing(true);
}
}
@Test
public void failTailingUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.tailing();
public void failTailingUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.tailing();
}
}
@Test
public void failSetSnapshotUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.setSnapshot(null);
public void failSetSnapshotUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.setSnapshot(null);
}
}
@Test
public void failSnapshotUninitialized(){
ReadOptions readOptions = setupUninitializedReadOptions(
exception);
readOptions.snapshot();
public void failSnapshotUninitialized() {
try (final ReadOptions readOptions =
setupUninitializedReadOptions(exception)) {
readOptions.snapshot();
}
}
private ReadOptions setupUninitializedReadOptions(
ExpectedException exception) {
ReadOptions readOptions = new ReadOptions();
readOptions.dispose();
final ReadOptions readOptions = new ReadOptions();
readOptions.close();
exception.expect(AssertionError.class);
return readOptions;
}

File diff suppressed because it is too large Load Diff

@ -17,22 +17,23 @@ public class RocksEnvTest {
new RocksMemoryResource();
@Test
public void rocksEnv(){
Env rocksEnv = RocksEnv.getDefault();
rocksEnv.setBackgroundThreads(5);
// default rocksenv will always return zero for flush pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
isEqualTo(0);
rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL);
// default rocksenv will always return zero for flush pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
isEqualTo(0);
rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL);
// default rocksenv will always return zero for compaction pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)).
isEqualTo(0);
public void rocksEnv() {
try (final Env rocksEnv = RocksEnv.getDefault()) {
rocksEnv.setBackgroundThreads(5);
// default rocksenv will always return zero for flush pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
isEqualTo(0);
rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL);
// default rocksenv will always return zero for flush pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
isEqualTo(0);
rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL);
// default rocksenv will always return zero for compaction pool
// no matter what was set via setBackgroundThreads
assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)).
isEqualTo(0);
}
}
}

@ -22,50 +22,36 @@ public class RocksIteratorTest {
@Test
public void rocksIterator() throws RocksDBException {
RocksDB db = null;
Options options = null;
RocksIterator iterator = null;
try {
options = new Options();
options.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key1".getBytes(), "value1".getBytes());
db.put("key2".getBytes(), "value2".getBytes());
iterator = db.newIterator();
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key1".getBytes());
assertThat(iterator.value()).isEqualTo("value1".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
assertThat(iterator.value()).isEqualTo("value2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
iterator.seekToLast();
iterator.prev();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key1".getBytes());
assertThat(iterator.value()).isEqualTo("value1".getBytes());
iterator.seekToFirst();
iterator.seekToLast();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
assertThat(iterator.value()).isEqualTo("value2".getBytes());
iterator.status();
} finally {
if (iterator != null) {
iterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
try (final RocksIterator iterator = db.newIterator()) {
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key1".getBytes());
assertThat(iterator.value()).isEqualTo("value1".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
assertThat(iterator.value()).isEqualTo("value2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
iterator.seekToLast();
iterator.prev();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key1".getBytes());
assertThat(iterator.value()).isEqualTo("value1".getBytes());
iterator.seekToFirst();
iterator.seekToLast();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
assertThat(iterator.value()).isEqualTo("value2".getBytes());
iterator.status();
}
}
}

@ -33,73 +33,55 @@ public class RocksMemEnvTest {
"baz".getBytes()
};
Env env = null;
Options options = null;
RocksDB db = null;
FlushOptions flushOptions = null;
try {
env = new RocksMemEnv();
options = new Options().
setCreateIfMissing(true).
setEnv(env);
flushOptions = new FlushOptions().
setWaitForFlush(true);
db = RocksDB.open(options, "dir/db");
// write key/value pairs using MemEnv
for (int i=0; i < keys.length; i++) {
db.put(keys[i], values[i]);
try (final Env env = new RocksMemEnv();
final Options options = new Options()
.setCreateIfMissing(true)
.setEnv(env);
final FlushOptions flushOptions = new FlushOptions()
.setWaitForFlush(true);
) {
try (final RocksDB db = RocksDB.open(options, "dir/db")) {
// write key/value pairs using MemEnv
for (int i = 0; i < keys.length; i++) {
db.put(keys[i], values[i]);
}
// read key/value pairs using MemEnv
for (int i = 0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
// Check iterator access
try (final RocksIterator iterator = db.newIterator()) {
iterator.seekToFirst();
for (int i = 0; i < keys.length; i++) {
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo(keys[i]);
assertThat(iterator.value()).isEqualTo(values[i]);
iterator.next();
}
// reached end of database
assertThat(iterator.isValid()).isFalse();
}
// flush
db.flush(flushOptions);
// read key/value pairs after flush using MemEnv
for (int i = 0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
}
// read key/value pairs using MemEnv
for (int i=0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
// Check iterator access
RocksIterator iterator = db.newIterator();
iterator.seekToFirst();
for (int i=0; i < keys.length; i++) {
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo(keys[i]);
assertThat(iterator.value()).isEqualTo(values[i]);
iterator.next();
}
// reached end of database
assertThat(iterator.isValid()).isFalse();
iterator.dispose();
// flush
db.flush(flushOptions);
// read key/value pairs after flush using MemEnv
for (int i=0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
db.close();
options.setCreateIfMissing(false);
// After reopen the values shall still be in the mem env.
// as long as the env is not freed.
db = RocksDB.open(options, "dir/db");
// read key/value pairs using MemEnv
for (int i=0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (flushOptions != null) {
flushOptions.dispose();
}
if (env != null) {
env.dispose();
try (final RocksDB db = RocksDB.open(options, "dir/db")) {
// read key/value pairs using MemEnv
for (int i = 0; i < keys.length; i++) {
assertThat(db.get(keys[i])).isEqualTo(values[i]);
}
}
}
}
@ -125,27 +107,22 @@ public class RocksMemEnvTest {
"baz".getBytes()
};
Env env = null;
Options options = null;
RocksDB db = null, otherDb = null;
try {
env = new RocksMemEnv();
options = new Options().
setCreateIfMissing(true).
setEnv(env);
db = RocksDB.open(options, "dir/db");
otherDb = RocksDB.open(options, "dir/otherDb");
try (final Env env = new RocksMemEnv();
final Options options = new Options()
.setCreateIfMissing(true)
.setEnv(env);
final RocksDB db = RocksDB.open(options, "dir/db");
final RocksDB otherDb = RocksDB.open(options, "dir/otherDb")
) {
// write key/value pairs using MemEnv
// to db and to otherDb.
for (int i=0; i < keys.length; i++) {
for (int i = 0; i < keys.length; i++) {
db.put(keys[i], values[i]);
otherDb.put(otherKeys[i], values[i]);
}
// verify key/value pairs after flush using MemEnv
for (int i=0; i < keys.length; i++) {
for (int i = 0; i < keys.length; i++) {
// verify db
assertThat(db.get(otherKeys[i])).isNull();
assertThat(db.get(keys[i])).isEqualTo(values[i]);
@ -154,43 +131,18 @@ public class RocksMemEnvTest {
assertThat(otherDb.get(keys[i])).isNull();
assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]);
}
} finally {
if (db != null) {
db.close();
}
if (otherDb != null) {
otherDb.close();
}
if (options != null) {
options.dispose();
}
if (env != null) {
env.dispose();
}
}
}
@Test(expected = RocksDBException.class)
public void createIfMissingFalse() throws RocksDBException {
Env env = null;
Options options = null;
RocksDB db = null;
try {
env = new RocksMemEnv();
options = new Options().
setCreateIfMissing(false).
setEnv(env);
try (final Env env = new RocksMemEnv();
final Options options = new Options()
.setCreateIfMissing(false)
.setEnv(env);
final RocksDB db = RocksDB.open(options, "db/dir")) {
// shall throw an exception because db dir does not
// exist.
db = RocksDB.open(options, "db/dir");
} finally {
if (options != null) {
options.dispose();
}
if (env != null) {
env.dispose();
}
}
}
}

@ -5,7 +5,11 @@ import org.junit.rules.ExternalResource;
/**
* Resource to trigger garbage collection after each test
* run.
*
* @deprecated Will be removed with the implementation of
* {@link RocksObject#finalize()}
*/
@Deprecated
public class RocksMemoryResource extends ExternalResource {
static {

@ -17,89 +17,45 @@ public class SliceTest {
@Test
public void slice() {
Slice slice = null;
Slice otherSlice = null;
Slice thirdSlice = null;
try {
slice = new Slice("testSlice");
try (final Slice slice = new Slice("testSlice")) {
assertThat(slice.empty()).isFalse();
assertThat(slice.size()).isEqualTo(9);
assertThat(slice.data()).isEqualTo("testSlice".getBytes());
}
otherSlice = new Slice("otherSlice".getBytes());
try (final Slice otherSlice = new Slice("otherSlice".getBytes())) {
assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes());
}
thirdSlice = new Slice("otherSlice".getBytes(), 5);
try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) {
assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes());
} finally {
if (slice != null) {
slice.dispose();
}
if (otherSlice != null) {
otherSlice.dispose();
}
if (thirdSlice != null) {
thirdSlice.dispose();
}
}
}
@Test
public void sliceEquals() {
Slice slice = null;
Slice slice2 = null;
try {
slice = new Slice("abc");
slice2 = new Slice("abc");
try (final Slice slice = new Slice("abc");
final Slice slice2 = new Slice("abc")) {
assertThat(slice.equals(slice2)).isTrue();
assertThat(slice.hashCode() == slice2.hashCode()).isTrue();
} finally {
if (slice != null) {
slice.dispose();
}
if (slice2 != null) {
slice2.dispose();
}
}
}
@Test
public void sliceStartWith() {
Slice slice = null;
Slice match = null;
Slice noMatch = null;
try {
slice = new Slice("matchpoint");
match = new Slice("mat");
noMatch = new Slice("nomatch");
//assertThat(slice.startsWith(match)).isTrue();
try (final Slice slice = new Slice("matchpoint");
final Slice match = new Slice("mat");
final Slice noMatch = new Slice("nomatch")) {
assertThat(slice.startsWith(match)).isTrue();
assertThat(slice.startsWith(noMatch)).isFalse();
} finally {
if (slice != null) {
slice.dispose();
}
if (match != null) {
match.dispose();
}
if (noMatch != null) {
noMatch.dispose();
}
}
}
@Test
public void sliceToString() {
Slice slice = null;
try {
slice = new Slice("stringTest");
try (final Slice slice = new Slice("stringTest")) {
assertThat(slice.toString()).isEqualTo("stringTest");
assertThat(slice.toString(true)).isNotEqualTo("");
} finally {
if (slice != null) {
slice.dispose();
}
}
}
}

@ -22,195 +22,147 @@ public class SnapshotTest {
@Test
public void snapshots() throws RocksDBException {
RocksDB db = null;
Options options = null;
ReadOptions readOptions = null;
try {
options = new Options();
options.setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key".getBytes(), "value".getBytes());
// Get new Snapshot of database
Snapshot snapshot = db.getSnapshot();
assertThat(snapshot.getSequenceNumber()).isGreaterThan(0);
assertThat(snapshot.getSequenceNumber()).isEqualTo(1);
readOptions = new ReadOptions();
// set snapshot in ReadOptions
readOptions.setSnapshot(snapshot);
// retrieve key value pair
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
// retrieve key value pair created before
// the snapshot was made
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// add new key/value pair
db.put("newkey".getBytes(), "newvalue".getBytes());
// using no snapshot the latest db entries
// will be taken into account
assertThat(new String(db.get("newkey".getBytes()))).
isEqualTo("newvalue");
// snapshopot was created before newkey
assertThat(db.get(readOptions, "newkey".getBytes())).
isNull();
// Retrieve snapshot from read options
Snapshot sameSnapshot = readOptions.snapshot();
readOptions.setSnapshot(sameSnapshot);
// results must be the same with new Snapshot
// instance using the same native pointer
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// update key value pair to newvalue
db.put("key".getBytes(), "newvalue".getBytes());
// read with previously created snapshot will
// read previous version of key value pair
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// read for newkey using the snapshot must be
// null
assertThat(db.get(readOptions, "newkey".getBytes())).
isNull();
// setting null to snapshot in ReadOptions leads
// to no Snapshot being used.
readOptions.setSnapshot(null);
assertThat(new String(db.get(readOptions,
"newkey".getBytes()))).isEqualTo("newvalue");
// release Snapshot
db.releaseSnapshot(snapshot);
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (readOptions != null) {
readOptions.dispose();
try (final Snapshot snapshot = db.getSnapshot()) {
assertThat(snapshot.getSequenceNumber()).isGreaterThan(0);
assertThat(snapshot.getSequenceNumber()).isEqualTo(1);
try (final ReadOptions readOptions = new ReadOptions()) {
// set snapshot in ReadOptions
readOptions.setSnapshot(snapshot);
// retrieve key value pair
assertThat(new String(db.get("key".getBytes()))).
isEqualTo("value");
// retrieve key value pair created before
// the snapshot was made
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// add new key/value pair
db.put("newkey".getBytes(), "newvalue".getBytes());
// using no snapshot the latest db entries
// will be taken into account
assertThat(new String(db.get("newkey".getBytes()))).
isEqualTo("newvalue");
// snapshopot was created before newkey
assertThat(db.get(readOptions, "newkey".getBytes())).
isNull();
// Retrieve snapshot from read options
try (final Snapshot sameSnapshot = readOptions.snapshot()) {
readOptions.setSnapshot(sameSnapshot);
// results must be the same with new Snapshot
// instance using the same native pointer
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// update key value pair to newvalue
db.put("key".getBytes(), "newvalue".getBytes());
// read with previously created snapshot will
// read previous version of key value pair
assertThat(new String(db.get(readOptions,
"key".getBytes()))).isEqualTo("value");
// read for newkey using the snapshot must be
// null
assertThat(db.get(readOptions, "newkey".getBytes())).
isNull();
// setting null to snapshot in ReadOptions leads
// to no Snapshot being used.
readOptions.setSnapshot(null);
assertThat(new String(db.get(readOptions,
"newkey".getBytes()))).isEqualTo("newvalue");
// release Snapshot
db.releaseSnapshot(snapshot);
}
}
}
}
}
@Test
public void iteratorWithSnapshot() throws RocksDBException {
RocksDB db = null;
Options options = null;
ReadOptions readOptions = null;
RocksIterator iterator = null;
RocksIterator snapshotIterator = null;
try {
options = new Options();
options.setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key".getBytes(), "value".getBytes());
// Get new Snapshot of database
Snapshot snapshot = db.getSnapshot();
readOptions = new ReadOptions();
// set snapshot in ReadOptions
readOptions.setSnapshot(snapshot);
db.put("key2".getBytes(), "value2".getBytes());
// iterate over current state of db
iterator = db.newIterator();
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
// iterate using a snapshot
snapshotIterator = db.newIterator(readOptions);
snapshotIterator.seekToFirst();
assertThat(snapshotIterator.isValid()).isTrue();
assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
snapshotIterator.next();
assertThat(snapshotIterator.isValid()).isFalse();
// release Snapshot
db.releaseSnapshot(snapshot);
} finally {
if (iterator != null) {
iterator.dispose();
}
if (snapshotIterator != null) {
snapshotIterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (readOptions != null) {
readOptions.dispose();
try (final Snapshot snapshot = db.getSnapshot();
final ReadOptions readOptions =
new ReadOptions().setSnapshot(snapshot)) {
db.put("key2".getBytes(), "value2".getBytes());
// iterate over current state of db
try (final RocksIterator iterator = db.newIterator()) {
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
}
// iterate using a snapshot
try (final RocksIterator snapshotIterator =
db.newIterator(readOptions)) {
snapshotIterator.seekToFirst();
assertThat(snapshotIterator.isValid()).isTrue();
assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
snapshotIterator.next();
assertThat(snapshotIterator.isValid()).isFalse();
}
// release Snapshot
db.releaseSnapshot(snapshot);
}
}
}
@Test
public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException {
RocksDB db = null;
Options options = null;
ReadOptions readOptions = null;
RocksIterator iterator = null;
RocksIterator snapshotIterator = null;
try {
try (final Options options = new Options()
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
options = new Options();
options.setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
db.put("key".getBytes(), "value".getBytes());
// Get new Snapshot of database
Snapshot snapshot = db.getSnapshot();
readOptions = new ReadOptions();
// set snapshot in ReadOptions
readOptions.setSnapshot(snapshot);
db.put("key2".getBytes(), "value2".getBytes());
// iterate over current state of column family
iterator = db.newIterator(db.getDefaultColumnFamily());
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
// iterate using a snapshot on default column family
snapshotIterator = db.newIterator(db.getDefaultColumnFamily(),
readOptions);
snapshotIterator.seekToFirst();
assertThat(snapshotIterator.isValid()).isTrue();
assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
snapshotIterator.next();
assertThat(snapshotIterator.isValid()).isFalse();
// release Snapshot
db.releaseSnapshot(snapshot);
} finally {
if (iterator != null) {
iterator.dispose();
}
if (snapshotIterator != null) {
snapshotIterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
if (readOptions != null) {
readOptions.dispose();
try (final Snapshot snapshot = db.getSnapshot();
final ReadOptions readOptions = new ReadOptions()
.setSnapshot(snapshot)) {
db.put("key2".getBytes(), "value2".getBytes());
// iterate over current state of column family
try (final RocksIterator iterator = db.newIterator(
db.getDefaultColumnFamily())) {
iterator.seekToFirst();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key".getBytes());
iterator.next();
assertThat(iterator.isValid()).isTrue();
assertThat(iterator.key()).isEqualTo("key2".getBytes());
iterator.next();
assertThat(iterator.isValid()).isFalse();
}
// iterate using a snapshot on default column family
try (final RocksIterator snapshotIterator = db.newIterator(
db.getDefaultColumnFamily(), readOptions)) {
snapshotIterator.seekToFirst();
assertThat(snapshotIterator.isValid()).isTrue();
assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
snapshotIterator.next();
assertThat(snapshotIterator.isValid()).isFalse();
// release Snapshot
db.releaseSnapshot(snapshot);
}
}
}
}

@ -26,19 +26,18 @@ public class StatisticsCollectorTest {
@Test
public void statisticsCollector()
throws InterruptedException, RocksDBException {
Options opt = null;
RocksDB db = null;
try {
opt = new Options().createStatistics().setCreateIfMissing(true);
Statistics stats = opt.statisticsPtr();
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
StatsCallbackMock callback = new StatsCallbackMock();
StatsCollectorInput statsInput = new StatsCollectorInput(stats, callback);
StatisticsCollector statsCollector = new StatisticsCollector(
try (final Options opt = new Options()
.createStatistics()
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath())) {
final Statistics stats = opt.statisticsPtr();
final StatsCallbackMock callback = new StatsCallbackMock();
final StatsCollectorInput statsInput =
new StatsCollectorInput(stats, callback);
final StatisticsCollector statsCollector = new StatisticsCollector(
Collections.singletonList(statsInput), 100);
statsCollector.start();
@ -48,13 +47,6 @@ public class StatisticsCollectorTest {
assertThat(callback.histCallbackCount).isGreaterThan(0);
statsCollector.shutDown(1000);
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
}

@ -17,43 +17,27 @@ public class TransactionLogIteratorTest {
@Test
public void transactionLogIterator() throws RocksDBException {
RocksDB db = null;
Options options = null;
TransactionLogIterator transactionLogIterator = null;
try {
options = new Options().
setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
transactionLogIterator = db.getUpdatesSince(0);
} finally {
if (transactionLogIterator != null) {
transactionLogIterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
try (final Options options = new Options()
.setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath());
final TransactionLogIterator transactionLogIterator =
db.getUpdatesSince(0)) {
//no-op
}
}
@Test
public void getBatch() throws RocksDBException {
final int numberOfPuts = 5;
RocksDB db = null;
Options options = null;
ColumnFamilyHandle cfHandle = null;
TransactionLogIterator transactionLogIterator = null;
try {
options = new Options().
setCreateIfMissing(true).
setWalTtlSeconds(1000).
setWalSizeLimitMB(10);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
for (int i = 0; i < numberOfPuts; i++){
try (final Options options = new Options()
.setCreateIfMissing(true)
.setWalTtlSeconds(1000)
.setWalSizeLimitMB(10);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
for (int i = 0; i < numberOfPuts; i++) {
db.put(String.valueOf(i).getBytes(),
String.valueOf(i).getBytes());
}
@ -65,117 +49,89 @@ public class TransactionLogIteratorTest {
isEqualTo(numberOfPuts);
// insert 5 writes into a cf
cfHandle = db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf".getBytes()));
for (int i = 0; i < numberOfPuts; i++){
db.put(cfHandle, String.valueOf(i).getBytes(),
String.valueOf(i).getBytes());
}
// the latest sequence number is 10 because
// (5 + 5) puts were written beforehand
assertThat(db.getLatestSequenceNumber()).
isEqualTo(numberOfPuts + numberOfPuts);
// Get updates since the beginning
transactionLogIterator = db.getUpdatesSince(0);
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.status();
// The first sequence number is 1
TransactionLogIterator.BatchResult batchResult =
transactionLogIterator.getBatch();
assertThat(batchResult.sequenceNumber()).isEqualTo(1);
} finally {
if (transactionLogIterator != null) {
transactionLogIterator.dispose();
}
if (cfHandle != null) {
cfHandle.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
try (final ColumnFamilyHandle cfHandle = db.createColumnFamily(
new ColumnFamilyDescriptor("new_cf".getBytes()))) {
for (int i = 0; i < numberOfPuts; i++) {
db.put(cfHandle, String.valueOf(i).getBytes(),
String.valueOf(i).getBytes());
}
// the latest sequence number is 10 because
// (5 + 5) puts were written beforehand
assertThat(db.getLatestSequenceNumber()).
isEqualTo(numberOfPuts + numberOfPuts);
// Get updates since the beginning
try (final TransactionLogIterator transactionLogIterator =
db.getUpdatesSince(0)) {
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.status();
// The first sequence number is 1
final TransactionLogIterator.BatchResult batchResult =
transactionLogIterator.getBatch();
assertThat(batchResult.sequenceNumber()).isEqualTo(1);
}
}
}
}
@Test
public void transactionLogIteratorStallAtLastRecord() throws RocksDBException {
RocksDB db = null;
Options options = null;
TransactionLogIterator transactionLogIterator = null;
try {
options = new Options().
setCreateIfMissing(true).
setWalTtlSeconds(1000).
setWalSizeLimitMB(10);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
public void transactionLogIteratorStallAtLastRecord()
throws RocksDBException {
try (final Options options = new Options()
.setCreateIfMissing(true)
.setWalTtlSeconds(1000)
.setWalSizeLimitMB(10);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key1".getBytes(), "value1".getBytes());
// Get updates since the beginning
transactionLogIterator = db.getUpdatesSince(0);
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.next();
assertThat(transactionLogIterator.isValid()).isFalse();
transactionLogIterator.status();
db.put("key2".getBytes(), "value2".getBytes());
transactionLogIterator.next();
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
} finally {
if (transactionLogIterator != null) {
transactionLogIterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
try (final TransactionLogIterator transactionLogIterator =
db.getUpdatesSince(0)) {
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.next();
assertThat(transactionLogIterator.isValid()).isFalse();
transactionLogIterator.status();
db.put("key2".getBytes(), "value2".getBytes());
transactionLogIterator.next();
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
}
}
}
@Test
public void transactionLogIteratorCheckAfterRestart() throws RocksDBException {
public void transactionLogIteratorCheckAfterRestart()
throws RocksDBException {
final int numberOfKeys = 2;
RocksDB db = null;
Options options = null;
TransactionLogIterator transactionLogIterator = null;
try {
options = new Options().
setCreateIfMissing(true).
setWalTtlSeconds(1000).
setWalSizeLimitMB(10);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
db.put("key1".getBytes(), "value1".getBytes());
db.put("key2".getBytes(), "value2".getBytes());
db.flush(new FlushOptions().setWaitForFlush(true));
// reopen
db.close();
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys);
try (final Options options = new Options()
.setCreateIfMissing(true)
.setWalTtlSeconds(1000)
.setWalSizeLimitMB(10)) {
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
db.put("key1".getBytes(), "value1".getBytes());
db.put("key2".getBytes(), "value2".getBytes());
db.flush(new FlushOptions().setWaitForFlush(true));
transactionLogIterator = db.getUpdatesSince(0);
for (int i = 0; i < numberOfKeys; i++) {
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.next();
}
} finally {
if (transactionLogIterator != null) {
transactionLogIterator.dispose();
}
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
// reopen
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys);
try (final TransactionLogIterator transactionLogIterator =
db.getUpdatesSince(0)) {
for (int i = 0; i < numberOfKeys; i++) {
transactionLogIterator.status();
assertThat(transactionLogIterator.isValid()).isTrue();
transactionLogIterator.next();
}
}
}
}
}

@ -11,6 +11,7 @@ import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
@ -26,108 +27,74 @@ public class TtlDBTest {
public TemporaryFolder dbFolder = new TemporaryFolder();
@Test
public void ttlDBOpen() throws RocksDBException,
InterruptedException {
Options options = null;
TtlDB ttlDB = null;
try {
options = new Options().
setCreateIfMissing(true).
setMaxGrandparentOverlapFactor(0);
ttlDB = TtlDB.open(options,
dbFolder.getRoot().getAbsolutePath());
public void ttlDBOpen() throws RocksDBException, InterruptedException {
try (final Options options = new Options()
.setCreateIfMissing(true)
.setMaxGrandparentOverlapFactor(0);
final TtlDB ttlDB = TtlDB.open(options,
dbFolder.getRoot().getAbsolutePath())
) {
ttlDB.put("key".getBytes(), "value".getBytes());
assertThat(ttlDB.get("key".getBytes())).
isEqualTo("value".getBytes());
assertThat(ttlDB.get("key".getBytes())).isNotNull();
} finally {
if (ttlDB != null) {
ttlDB.close();
}
if (options != null) {
options.dispose();
}
}
}
@Test
public void ttlDBOpenWithTtl() throws RocksDBException,
InterruptedException {
Options options = null;
TtlDB ttlDB = null;
try {
options = new Options().
setCreateIfMissing(true).
setMaxGrandparentOverlapFactor(0);
ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(),
1, false);
public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException {
try (final Options options = new Options()
.setCreateIfMissing(true)
.setMaxGrandparentOverlapFactor(0);
final TtlDB ttlDB = TtlDB.open(options,
dbFolder.getRoot().getAbsolutePath(), 1, false);
) {
ttlDB.put("key".getBytes(), "value".getBytes());
assertThat(ttlDB.get("key".getBytes())).
isEqualTo("value".getBytes());
TimeUnit.SECONDS.sleep(2);
ttlDB.compactRange();
assertThat(ttlDB.get("key".getBytes())).isNull();
} finally {
if (ttlDB != null) {
ttlDB.close();
}
if (options != null) {
options.dispose();
}
}
}
@Test
public void ttlDbOpenWithColumnFamilies() throws RocksDBException, InterruptedException {
DBOptions dbOptions = null;
TtlDB ttlDB = null;
List<ColumnFamilyDescriptor> cfNames =
new ArrayList<>();
List<ColumnFamilyHandle> columnFamilyHandleList =
new ArrayList<>();
cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes()));
List<Integer> ttlValues = new ArrayList<>();
// Default column family with infinite lifetime
ttlValues.add(0);
// new column family with 1 second ttl
ttlValues.add(1);
try {
dbOptions = new DBOptions().
setCreateMissingColumnFamilies(true).
setCreateIfMissing(true);
ttlDB = TtlDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(),
cfNames, columnFamilyHandleList, ttlValues, false);
ttlDB.put("key".getBytes(), "value".getBytes());
assertThat(ttlDB.get("key".getBytes())).
isEqualTo("value".getBytes());
ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(),
"value".getBytes());
assertThat(ttlDB.get(columnFamilyHandleList.get(1),
"key".getBytes())).isEqualTo("value".getBytes());
TimeUnit.SECONDS.sleep(2);
ttlDB.compactRange();
ttlDB.compactRange(columnFamilyHandleList.get(1));
assertThat(ttlDB.get("key".getBytes())).isNotNull();
assertThat(ttlDB.get(columnFamilyHandleList.get(1),
"key".getBytes())).isNull();
} finally {
for (ColumnFamilyHandle columnFamilyHandle :
columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (ttlDB != null) {
ttlDB.close();
}
if (dbOptions != null) {
dbOptions.dispose();
public void ttlDbOpenWithColumnFamilies() throws RocksDBException,
InterruptedException {
final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
new ColumnFamilyDescriptor("new_cf".getBytes())
);
final List<Integer> ttlValues = Arrays.asList(0, 1);
final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
try (final DBOptions dbOptions = new DBOptions()
.setCreateMissingColumnFamilies(true)
.setCreateIfMissing(true);
final TtlDB ttlDB = TtlDB.open(dbOptions,
dbFolder.getRoot().getAbsolutePath(), cfNames,
columnFamilyHandleList, ttlValues, false)) {
try {
ttlDB.put("key".getBytes(), "value".getBytes());
assertThat(ttlDB.get("key".getBytes())).
isEqualTo("value".getBytes());
ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(),
"value".getBytes());
assertThat(ttlDB.get(columnFamilyHandleList.get(1),
"key".getBytes())).isEqualTo("value".getBytes());
TimeUnit.SECONDS.sleep(2);
ttlDB.compactRange();
ttlDB.compactRange(columnFamilyHandleList.get(1));
assertThat(ttlDB.get("key".getBytes())).isNotNull();
assertThat(ttlDB.get(columnFamilyHandleList.get(1),
"key".getBytes())).isNull();
} finally {
for (final ColumnFamilyHandle columnFamilyHandle :
columnFamilyHandleList) {
columnFamilyHandle.close();
}
}
}
}
@ -135,15 +102,12 @@ public class TtlDBTest {
@Test
public void createTtlColumnFamily() throws RocksDBException,
InterruptedException {
Options options = null;
TtlDB ttlDB = null;
ColumnFamilyHandle columnFamilyHandle = null;
try {
options = new Options().setCreateIfMissing(true);
ttlDB = TtlDB.open(options,
dbFolder.getRoot().getAbsolutePath());
columnFamilyHandle = ttlDB.createColumnFamilyWithTtl(
new ColumnFamilyDescriptor("new_cf".getBytes()), 1);
try (final Options options = new Options().setCreateIfMissing(true);
final TtlDB ttlDB = TtlDB.open(options,
dbFolder.getRoot().getAbsolutePath());
final ColumnFamilyHandle columnFamilyHandle =
ttlDB.createColumnFamilyWithTtl(
new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) {
ttlDB.put(columnFamilyHandle, "key".getBytes(),
"value".getBytes());
assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).
@ -151,16 +115,6 @@ public class TtlDBTest {
TimeUnit.SECONDS.sleep(2);
ttlDB.compactRange(columnFamilyHandle);
assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull();
} finally {
if (columnFamilyHandle != null) {
columnFamilyHandle.dispose();
}
if (ttlDB != null) {
ttlDB.close();
}
if (options != null) {
options.dispose();
}
}
}
}

@ -23,28 +23,26 @@ public class WriteBatchHandlerTest {
@Test
public void writeBatchHandler() throws IOException, RocksDBException {
WriteBatch batch = null;
CapturingWriteBatchHandler handler = null;
try {
// setup test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> testEvents = new ArrayList<>();
testEvents.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k0".getBytes(), null)));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k1".getBytes(), "v1".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k2".getBytes(), "v2".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k3".getBytes(), "v3".getBytes())));
testEvents.add(new Tuple<>(Action.LOG,
new Tuple<byte[], byte[]>(null, "log1".getBytes())));
testEvents.add(new Tuple<>(Action.MERGE,
new Tuple<>("k2".getBytes(), "v22".getBytes())));
testEvents.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k3".getBytes(), null)));
// load test data to the write batch
batch = new WriteBatch();
// setup test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> testEvents = Arrays.asList(
new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k0".getBytes(), null)),
new Tuple<>(Action.PUT,
new Tuple<>("k1".getBytes(), "v1".getBytes())),
new Tuple<>(Action.PUT,
new Tuple<>("k2".getBytes(), "v2".getBytes())),
new Tuple<>(Action.PUT,
new Tuple<>("k3".getBytes(), "v3".getBytes())),
new Tuple<>(Action.LOG,
new Tuple<byte[], byte[]>(null, "log1".getBytes())),
new Tuple<>(Action.MERGE,
new Tuple<>("k2".getBytes(), "v22".getBytes())),
new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k3".getBytes(), null))
);
// load test data to the write batch
try (final WriteBatch batch = new WriteBatch()) {
for (final Tuple<Action, Tuple<byte[], byte[]>> testEvent : testEvents) {
final Tuple<byte[], byte[]> data = testEvent.value;
switch (testEvent.key) {
@ -67,29 +65,27 @@ public class WriteBatchHandlerTest {
}
}
// attempt to read test data back from the WriteBatch by iterating with a handler
handler = new CapturingWriteBatchHandler();
batch.iterate(handler);
// attempt to read test data back from the WriteBatch by iterating
// with a handler
try (final CapturingWriteBatchHandler handler =
new CapturingWriteBatchHandler()) {
batch.iterate(handler);
// compare the results to the test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> actualEvents = handler.getEvents();
assertThat(testEvents.size()).isSameAs(actualEvents.size());
// compare the results to the test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> actualEvents =
handler.getEvents();
assertThat(testEvents.size()).isSameAs(actualEvents.size());
for (int i = 0; i < testEvents.size(); i++) {
assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue();
}
} finally {
if (handler != null) {
handler.dispose();
}
if (batch != null) {
batch.dispose();
for (int i = 0; i < testEvents.size(); i++) {
assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue();
}
}
}
}
private static boolean equals(final Tuple<Action, Tuple<byte[], byte[]>> expected,
final Tuple<Action, Tuple<byte[], byte[]>> actual) {
private static boolean equals(
final Tuple<Action, Tuple<byte[], byte[]>> expected,
final Tuple<Action, Tuple<byte[], byte[]>> actual) {
if (!expected.key.equals(actual.key)) {
return false;
}
@ -136,7 +132,8 @@ public class WriteBatchHandlerTest {
*/
private static class CapturingWriteBatchHandler extends WriteBatch.Handler {
private final List<Tuple<Action, Tuple<byte[], byte[]>>> events = new ArrayList<>();
private final List<Tuple<Action, Tuple<byte[], byte[]>>> events
= new ArrayList<>();
/**
* Returns a copy of the current events list
@ -159,12 +156,14 @@ public class WriteBatchHandlerTest {
@Override
public void delete(final byte[] key) {
events.add(new Tuple<>(Action.DELETE, new Tuple<byte[], byte[]>(key, null)));
events.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>(key, null)));
}
@Override
public void logData(final byte[] blob) {
events.add(new Tuple<>(Action.LOG, new Tuple<byte[], byte[]>(null, blob)));
events.add(new Tuple<>(Action.LOG,
new Tuple<byte[], byte[]>(null, blob)));
}
}
}

@ -20,9 +20,9 @@ import static org.assertj.core.api.Assertions.assertThat;
/**
* This class mimics the db/write_batch_test.cc
* in the c++ rocksdb library.
*
* <p/>
* Not ported yet:
*
* <p/>
* Continue();
* PutGatherSlices();
*/
@ -36,80 +36,90 @@ public class WriteBatchTest {
@Test
public void emptyWriteBatch() {
WriteBatch batch = new WriteBatch();
assertThat(batch.count()).isEqualTo(0);
try (final WriteBatch batch = new WriteBatch()) {
assertThat(batch.count()).isEqualTo(0);
}
}
@Test
public void multipleBatchOperations()
throws UnsupportedEncodingException {
WriteBatch batch = new WriteBatch();
batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
batch.remove("box".getBytes("US-ASCII"));
batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.setSequence(batch, 100);
assertThat(WriteBatchTestInternalHelper.sequence(batch)).
isNotNull().
isEqualTo(100);
assertThat(batch.count()).isEqualTo(3);
assertThat(new String(getContents(batch), "US-ASCII")).
isEqualTo("Put(baz, boo)@102" +
"Delete(box)@101" +
"Put(foo, bar)@100");
try (WriteBatch batch = new WriteBatch()) {
batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
batch.remove("box".getBytes("US-ASCII"));
batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.setSequence(batch, 100);
assertThat(WriteBatchTestInternalHelper.sequence(batch)).
isNotNull().
isEqualTo(100);
assertThat(batch.count()).isEqualTo(3);
assertThat(new String(getContents(batch), "US-ASCII")).
isEqualTo("Put(baz, boo)@102" +
"Delete(box)@101" +
"Put(foo, bar)@100");
}
}
@Test
public void testAppendOperation()
throws UnsupportedEncodingException {
WriteBatch b1 = new WriteBatch();
WriteBatch b2 = new WriteBatch();
WriteBatchTestInternalHelper.setSequence(b1, 200);
WriteBatchTestInternalHelper.setSequence(b2, 300);
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(getContents(b1).length).isEqualTo(0);
assertThat(b1.count()).isEqualTo(0);
b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat("Put(a, va)@200".equals(new String(getContents(b1), "US-ASCII")));
assertThat(b1.count()).isEqualTo(1);
b2.clear();
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(("Put(a, va)@200" +
"Put(b, vb)@201")
.equals(new String(getContents(b1), "US-ASCII")));
assertThat(b1.count()).isEqualTo(2);
b2.remove("foo".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(("Put(a, va)@200" +
"Put(b, vb)@202" +
"Put(b, vb)@201" +
"Delete(foo)@203")
.equals(new String(getContents(b1), "US-ASCII")));
assertThat(b1.count()).isEqualTo(4);
try (final WriteBatch b1 = new WriteBatch();
final WriteBatch b2 = new WriteBatch()) {
WriteBatchTestInternalHelper.setSequence(b1, 200);
WriteBatchTestInternalHelper.setSequence(b2, 300);
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(getContents(b1).length).isEqualTo(0);
assertThat(b1.count()).isEqualTo(0);
b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat("Put(a, va)@200".equals(new String(getContents(b1),
"US-ASCII")));
assertThat(b1.count()).isEqualTo(1);
b2.clear();
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(("Put(a, va)@200" +
"Put(b, vb)@201")
.equals(new String(getContents(b1), "US-ASCII")));
assertThat(b1.count()).isEqualTo(2);
b2.remove("foo".getBytes("US-ASCII"));
WriteBatchTestInternalHelper.append(b1, b2);
assertThat(("Put(a, va)@200" +
"Put(b, vb)@202" +
"Put(b, vb)@201" +
"Delete(foo)@203")
.equals(new String(getContents(b1), "US-ASCII")));
assertThat(b1.count()).isEqualTo(4);
}
}
@Test
public void blobOperation()
throws UnsupportedEncodingException {
WriteBatch batch = new WriteBatch();
batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII"));
batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII"));
batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII"));
batch.putLogData("blob1".getBytes("US-ASCII"));
batch.remove("k2".getBytes("US-ASCII"));
batch.putLogData("blob2".getBytes("US-ASCII"));
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
assertThat(batch.count()).isEqualTo(5);
assertThat(("Merge(foo, bar)@4" +
"Put(k1, v1)@0" +
"Delete(k2)@3" +
"Put(k2, v2)@1" +
"Put(k3, v3)@2")
.equals(new String(getContents(batch), "US-ASCII")));
try (final WriteBatch batch = new WriteBatch()) {
batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII"));
batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII"));
batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII"));
batch.putLogData("blob1".getBytes("US-ASCII"));
batch.remove("k2".getBytes("US-ASCII"));
batch.putLogData("blob2".getBytes("US-ASCII"));
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
assertThat(batch.count()).isEqualTo(5);
assertThat(("Merge(foo, bar)@4" +
"Put(k1, v1)@0" +
"Delete(k2)@3" +
"Put(k2, v2)@1" +
"Put(k3, v3)@2")
.equals(new String(getContents(batch), "US-ASCII")));
}
}
static byte[] getContents(final WriteBatch wb) {
return getContents(wb.nativeHandle_);
}
static native byte[] getContents(WriteBatch batch);
private static native byte[] getContents(final long writeBatchHandle);
}
/**
@ -117,7 +127,23 @@ public class WriteBatchTest {
* c++ WriteBatchInternal.
*/
class WriteBatchTestInternalHelper {
static native void setSequence(WriteBatch batch, long sn);
static native long sequence(WriteBatch batch);
static native void append(WriteBatch b1, WriteBatch b2);
static void setSequence(final WriteBatch wb, final long sn) {
setSequence(wb.nativeHandle_, sn);
}
static long sequence(final WriteBatch wb) {
return sequence(wb.nativeHandle_);
}
static void append(final WriteBatch wb1, final WriteBatch wb2) {
append(wb1.nativeHandle_, wb2.nativeHandle_);
}
private static native void setSequence(final long writeBatchHandle,
final long sn);
private static native long sequence(final long writeBatchHandle);
private static native void append(final long writeBatchHandle1,
final long writeBatchHandle2);
}

@ -32,13 +32,9 @@ public class WriteBatchWithIndexTest {
@Test
public void readYourOwnWrites() throws RocksDBException {
RocksDB db = null;
Options options = null;
try {
options = new Options();
// Setup options
options.setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
final byte[] k1 = "key1".getBytes();
final byte[] v1 = "value1".getBytes();
@ -48,13 +44,9 @@ public class WriteBatchWithIndexTest {
db.put(k1, v1);
db.put(k2, v2);
final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
RocksIterator base = null;
RocksIterator it = null;
try {
base = db.newIterator();
it = wbwi.newIteratorWithBase(base);
try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
final RocksIterator base = db.newIterator();
final RocksIterator it = wbwi.newIteratorWithBase(base)) {
it.seek(k1);
assertThat(it.isValid()).isTrue();
@ -95,167 +87,121 @@ public class WriteBatchWithIndexTest {
assertThat(it.isValid()).isTrue();
assertThat(it.key()).isEqualTo(k1);
assertThat(it.value()).isEqualTo(v1Other);
} finally {
if (it != null) {
it.dispose();
}
if (base != null) {
base.dispose();
}
}
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
}
}
@Test
public void write_writeBatchWithIndex() throws RocksDBException {
RocksDB db = null;
Options options = null;
try {
options = new Options();
// Setup options
options.setCreateIfMissing(true);
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
try (final Options options = new Options().setCreateIfMissing(true);
final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath())) {
final byte[] k1 = "key1".getBytes();
final byte[] v1 = "value1".getBytes();
final byte[] k2 = "key2".getBytes();
final byte[] v2 = "value2".getBytes();
WriteBatchWithIndex wbwi = null;
try {
wbwi = new WriteBatchWithIndex();
try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
wbwi.put(k1, v1);
wbwi.put(k2, v2);
db.write(new WriteOptions(), wbwi);
} finally {
if(wbwi != null) {
wbwi.dispose();
}
}
assertThat(db.get(k1)).isEqualTo(v1);
assertThat(db.get(k2)).isEqualTo(v2);
} finally {
if (db != null) {
db.close();
}
if (options != null) {
options.dispose();
}
}
}
@Test
public void iterator() throws RocksDBException {
final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
final String k1 = "key1";
final String v1 = "value1";
final String k2 = "key2";
final String v2 = "value2";
final String k3 = "key3";
final String v3 = "value3";
final byte[] k1b = k1.getBytes();
final byte[] v1b = v1.getBytes();
final byte[] k2b = k2.getBytes();
final byte[] v2b = v2.getBytes();
final byte[] k3b = k3.getBytes();
final byte[] v3b = v3.getBytes();
//add put records
wbwi.put(k1b, v1b);
wbwi.put(k2b, v2b);
wbwi.put(k3b, v3b);
//add a deletion record
final String k4 = "key4";
final byte[] k4b = k4.getBytes();
wbwi.remove(k4b);
WBWIRocksIterator.WriteEntry[] expected = {
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k1), new DirectSlice(v1)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k2), new DirectSlice(v2)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k3), new DirectSlice(v3)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE,
new DirectSlice(k4), DirectSlice.NONE)
};
WBWIRocksIterator it = null;
try {
it = wbwi.newIterator();
//direct access - seek to key offsets
final int[] testOffsets = {2, 0, 1, 3};
for(int i = 0; i < testOffsets.length; i++) {
final int testOffset = testOffsets[i];
final byte[] key = toArray(expected[testOffset].getKey().data());
it.seek(key);
assertThat(it.isValid()).isTrue();
assertThat(it.entry().equals(expected[testOffset])).isTrue();
}
//forward iterative access
int i = 0;
for(it.seekToFirst(); it.isValid(); it.next()) {
assertThat(it.entry().equals(expected[i++])).isTrue();
}
try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
final String k1 = "key1";
final String v1 = "value1";
final String k2 = "key2";
final String v2 = "value2";
final String k3 = "key3";
final String v3 = "value3";
final byte[] k1b = k1.getBytes();
final byte[] v1b = v1.getBytes();
final byte[] k2b = k2.getBytes();
final byte[] v2b = v2.getBytes();
final byte[] k3b = k3.getBytes();
final byte[] v3b = v3.getBytes();
//add put records
wbwi.put(k1b, v1b);
wbwi.put(k2b, v2b);
wbwi.put(k3b, v3b);
//add a deletion record
final String k4 = "key4";
final byte[] k4b = k4.getBytes();
wbwi.remove(k4b);
final WBWIRocksIterator.WriteEntry[] expected = {
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k1), new DirectSlice(v1)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k2), new DirectSlice(v2)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(k3), new DirectSlice(v3)),
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE,
new DirectSlice(k4), DirectSlice.NONE)
};
try (final WBWIRocksIterator it = wbwi.newIterator()) {
//direct access - seek to key offsets
final int[] testOffsets = {2, 0, 1, 3};
for (int i = 0; i < testOffsets.length; i++) {
final int testOffset = testOffsets[i];
final byte[] key = toArray(expected[testOffset].getKey().data());
it.seek(key);
assertThat(it.isValid()).isTrue();
final WBWIRocksIterator.WriteEntry entry = it.entry();
assertThat(entry.equals(expected[testOffset])).isTrue();
}
//reverse iterative access
i = expected.length - 1;
for(it.seekToLast(); it.isValid(); it.prev()) {
assertThat(it.entry().equals(expected[i--])).isTrue();
}
//forward iterative access
int i = 0;
for (it.seekToFirst(); it.isValid(); it.next()) {
assertThat(it.entry().equals(expected[i++])).isTrue();
}
} finally {
if(it != null) {
it.dispose();
//reverse iterative access
i = expected.length - 1;
for (it.seekToLast(); it.isValid(); it.prev()) {
assertThat(it.entry().equals(expected[i--])).isTrue();
}
}
}
}
@Test
public void zeroByteTests() {
final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
byte[] zeroByteValue = new byte[] { 0, 0 };
//add zero byte value
wbwi.put(zeroByteValue, zeroByteValue);
ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
buffer.put(zeroByteValue);
WBWIRocksIterator.WriteEntry[] expected = {
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(buffer, zeroByteValue.length),
new DirectSlice(buffer, zeroByteValue.length))
};
WBWIRocksIterator it = null;
try {
it = wbwi.newIterator();
it.seekToFirst();
assertThat(it.entry().equals(expected[0])).isTrue();
assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue();
} finally {
if(it != null) {
it.dispose();
try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
final byte[] zeroByteValue = new byte[]{0, 0};
//add zero byte value
wbwi.put(zeroByteValue, zeroByteValue);
final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
buffer.put(zeroByteValue);
WBWIRocksIterator.WriteEntry[] expected = {
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
new DirectSlice(buffer, zeroByteValue.length),
new DirectSlice(buffer, zeroByteValue.length))
};
try (final WBWIRocksIterator it = wbwi.newIterator()) {
it.seekToFirst();
assertThat(it.entry().equals(expected[0])).isTrue();
assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue();
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save