diff --git a/java/Makefile b/java/Makefile index 11c6c807e..d7c837ebb 100644 --- a/java/Makefile +++ b/java/Makefile @@ -1,6 +1,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.AbstractCompactionFilterFactory\ org.rocksdb.AbstractSlice\ + org.rocksdb.AbstractTransactionNotifier\ org.rocksdb.BackupEngine\ org.rocksdb.BackupableDBOptions\ org.rocksdb.BlockBasedTableConfig\ @@ -29,6 +30,8 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.Logger\ org.rocksdb.LRUCache\ org.rocksdb.MergeOperator\ + org.rocksdb.OptimisticTransactionDB\ + org.rocksdb.OptimisticTransactionOptions\ org.rocksdb.Options\ org.rocksdb.OptionsUtil\ org.rocksdb.PlainTableConfig\ @@ -45,6 +48,10 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.Slice\ org.rocksdb.SstFileWriter\ org.rocksdb.Statistics\ + org.rocksdb.Transaction\ + org.rocksdb.TransactionDB\ + org.rocksdb.TransactionDBOptions\ + org.rocksdb.TransactionOptions\ org.rocksdb.TransactionLogIterator\ org.rocksdb.TtlDB\ org.rocksdb.VectorMemTableConfig\ @@ -105,6 +112,9 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.MixedOptionsTest\ org.rocksdb.MutableColumnFamilyOptionsTest\ org.rocksdb.NativeLibraryLoaderTest\ + org.rocksdb.OptimisticTransactionTest\ + org.rocksdb.OptimisticTransactionDBTest\ + org.rocksdb.OptimisticTransactionOptionsTest\ org.rocksdb.OptionsUtilTest\ org.rocksdb.OptionsTest\ org.rocksdb.PlainTableConfigTest\ @@ -120,6 +130,10 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ org.rocksdb.SliceTest\ org.rocksdb.SnapshotTest\ org.rocksdb.SstFileWriterTest\ + org.rocksdb.TransactionTest\ + org.rocksdb.TransactionDBTest\ + org.rocksdb.TransactionOptionsTest\ + org.rocksdb.TransactionDBOptionsTest\ org.rocksdb.TransactionLogIteratorTest\ org.rocksdb.TtlDBTest\ org.rocksdb.StatisticsTest\ @@ -209,6 +223,20 @@ column_family_sample: java java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni $(AM_V_at)@rm -rf /tmp/rocksdbjni +transaction_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/TransactionSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) TransactionSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + +optimistic_transaction_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/OptimisticTransactionSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) OptimisticTransactionSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + resolve_test_deps: test -d "$(JAVA_TEST_LIBDIR)" || mkdir -p "$(JAVA_TEST_LIBDIR)" test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_JUNIT_JAR) $(SEARCH_REPO_URL)junit/junit/4.12/junit-4.12.jar diff --git a/java/rocksjni/columnfamilyhandle.cc b/java/rocksjni/columnfamilyhandle.cc index 6e40a7e01..c3274ed98 100644 --- a/java/rocksjni/columnfamilyhandle.cc +++ b/java/rocksjni/columnfamilyhandle.cc @@ -3,8 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). // -// This file implements the "bridge" between Java and C++ and enables -// calling c++ rocksdb::Iterator methods from Java side. +// This file implements the "bridge" between Java and C++ for +// rocksdb::ColumnFamilyHandle. #include #include @@ -13,14 +13,56 @@ #include "include/org_rocksdb_ColumnFamilyHandle.h" #include "rocksjni/portal.h" +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: getName + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_ColumnFamilyHandle_getName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + std::string cf_name = cfh->GetName(); + return rocksdb::JniUtil::copyBytes(env, cf_name); +} + +/* +* Class: org_rocksdb_ColumnFamilyHandle +* Method: getID +* Signature: (J)I +*/ +jint Java_org_rocksdb_ColumnFamilyHandle_getID( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + const int32_t id = cfh->GetID(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: getDescriptor + * Signature: (J)Lorg/rocksdb/ColumnFamilyDescriptor; + */ +jobject Java_org_rocksdb_ColumnFamilyHandle_getDescriptor( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + rocksdb::ColumnFamilyDescriptor desc; + rocksdb::Status s = cfh->GetDescriptor(&desc); + if (s.ok()) { + return rocksdb::ColumnFamilyDescriptorJni::construct(env, &desc); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } +} + /* * Class: org_rocksdb_ColumnFamilyHandle * Method: disposeInternal * Signature: (J)V */ void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal( - JNIEnv* env, jobject jobj, jlong handle) { - auto* cfh = reinterpret_cast(handle); + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); assert(cfh != nullptr); delete cfh; } diff --git a/java/rocksjni/optimistic_transaction_db.cc b/java/rocksjni/optimistic_transaction_db.cc new file mode 100644 index 000000000..3381c78f2 --- /dev/null +++ b/java/rocksjni/optimistic_transaction_db.cc @@ -0,0 +1,267 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::TransactionDB. + +#include + +#include "include/org_rocksdb_OptimisticTransactionDB.h" + +#include "rocksdb/options.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" +#include "rocksdb/utilities/transaction.h" + +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: open + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2( + JNIEnv* env, jclass jcls, jlong joptions_handle, jstring jdb_path) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* options = reinterpret_cast(joptions_handle); + rocksdb::OptimisticTransactionDB* otdb = nullptr; + rocksdb::Status s = + rocksdb::OptimisticTransactionDB::Open(*options, db_path, &otdb); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return reinterpret_cast(otdb); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: open + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jdb_options_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + std::vector column_families; + const jsize len_cols = env->GetArrayLength(jcolumn_names); + if (len_cols > 0) { + if (env->EnsureLocalCapacity(len_cols) != 0) { + // out of memory + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + jlong* jco = + env->GetLongArrayElements(jcolumn_options_handles, nullptr); + if(jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + for (int i = 0; i < len_cols; i++) { + const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const jbyteArray jcn_ba = reinterpret_cast(jcn); + const jsize jcf_name_len = env->GetArrayLength(jcn_ba); + if (env->EnsureLocalCapacity(jcf_name_len) != 0) { + // out of memory + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr); + if (jcf_name == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const std::string cf_name(reinterpret_cast(jcf_name), jcf_name_len); + const rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + } + + auto* db_options = reinterpret_cast(jdb_options_handle); + std::vector handles; + rocksdb::OptimisticTransactionDB* otdb = nullptr; + const rocksdb::Status s = rocksdb::OptimisticTransactionDB::Open(*db_options, + db_path, column_families, &handles, &otdb); + + env->ReleaseStringUTFChars(jdb_path, db_path); + + // check if open operation was successful + if (s.ok()) { + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(otdb); + for (int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return nullptr; + } + return jresults; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + rocksdb::Transaction* txn = + optimistic_txn_db->BeginTransaction(*write_options); + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong joptimistic_txn_options_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* optimistic_txn_options = + reinterpret_cast( + joptimistic_txn_options_handle); + rocksdb::Transaction* txn = + optimistic_txn_db->BeginTransaction(*write_options, + *optimistic_txn_options); + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* old_txn = + reinterpret_cast( + jold_txn_handle); + rocksdb::OptimisticTransactionOptions optimistic_txn_options; + rocksdb::Transaction* txn = + optimistic_txn_db->BeginTransaction(*write_options, + optimistic_txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_optimistic_txn + assert(txn == old_txn); + + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong joptimistic_txn_options_handle, jlong jold_txn_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* optimistic_txn_options = + reinterpret_cast( + joptimistic_txn_options_handle); + auto* old_txn = + reinterpret_cast( + jold_txn_handle); + rocksdb::Transaction* txn = + optimistic_txn_db->BeginTransaction(*write_options, + *optimistic_txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_optimisic_txn + assert(txn == old_txn); + + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: getBaseDB + * Signature: (J)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + return reinterpret_cast(optimistic_txn_db->GetBaseDB()); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/optimistic_transaction_options.cc b/java/rocksjni/optimistic_transaction_options.cc new file mode 100644 index 000000000..4c666e6ac --- /dev/null +++ b/java/rocksjni/optimistic_transaction_options.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::OptimisticTransactionOptions. + +#include + +#include "include/org_rocksdb_OptimisticTransactionOptions.h" + +#include "rocksdb/comparator.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: newOptimisticTransactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_OptimisticTransactionOptions_newOptimisticTransactionOptions( + JNIEnv* env, jclass jcls) { + rocksdb::OptimisticTransactionOptions* opts = + new rocksdb::OptimisticTransactionOptions(); + return reinterpret_cast(opts); +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: isSetSnapshot + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_OptimisticTransactionOptions_isSetSnapshot( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->set_snapshot; +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: setSetSnapshot + * Signature: (JZ)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_setSetSnapshot(JNIEnv* env, + jobject jobj, jlong jhandle, jboolean jset_snapshot) { + auto* opts = + reinterpret_cast(jhandle); + opts->set_snapshot = jset_snapshot; +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: setComparator + * Signature: (JJ)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_setComparator( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jcomparator_handle) { + auto* opts = + reinterpret_cast(jhandle); + opts->cmp = reinterpret_cast(jcomparator_handle); +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 522c374ac..2de445f49 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -14,9 +14,11 @@ #include #include #include +#include #include #include #include +#include #include #include "rocksdb/db.h" @@ -24,10 +26,12 @@ #include "rocksdb/rate_limiter.h" #include "rocksdb/status.h" #include "rocksdb/utilities/backupable_db.h" +#include "rocksdb/utilities/transaction_db.h" #include "rocksdb/utilities/write_batch_with_index.h" #include "rocksjni/compaction_filter_factory_jnicallback.h" #include "rocksjni/comparatorjnicallback.h" #include "rocksjni/loggerjnicallback.h" +#include "rocksjni/transaction_notifier_jnicallback.h" #include "rocksjni/writebatchhandlerjnicallback.h" // Remove macro on windows @@ -1087,6 +1091,31 @@ class AbstractCompactionFilterFactoryJni : public RocksDBNativeClass< } }; +// The portal class for org.rocksdb.AbstractTransactionNotifier +class AbstractTransactionNotifierJni : public RocksDBNativeClass< + const rocksdb::TransactionNotifierJniCallback*, + AbstractTransactionNotifierJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractTransactionNotifier"); + } + + // Get the java method `snapshotCreated` + // of org.rocksdb.AbstractTransactionNotifier. + static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V"); + assert(mid != nullptr); + return mid; + } +}; + // The portal class for org.rocksdb.AbstractComparator class AbstractComparatorJni : public RocksDBNativeClass< const rocksdb::BaseComparatorJniCallback*, @@ -2974,6 +3003,334 @@ class RateLimiterModeJni { } }; +// The portal class for org.rocksdb.Transaction +class TransactionJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.Transaction + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/Transaction"); + } + + /** + * Create a new Java org.rocksdb.Transaction.WaitingTransactions object + * + * @param env A pointer to the Java environment + * @param jtransaction A Java org.rocksdb.Transaction object + * @param column_family_id The id of the column family + * @param key The key + * @param transaction_ids The transaction ids + * + * @return A reference to a Java + * org.rocksdb.Transaction.WaitingTransactions object, + * or nullptr if an an exception occurs + */ + static jobject newWaitingTransactions(JNIEnv* env, jobject jtransaction, + const uint32_t column_family_id, const std::string &key, + const std::vector &transaction_ids) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "newWaitingTransactions", "(JLjava/lang/String;[J)Lorg/rocksdb/Transaction$WaitingTransactions;"); + if(mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jkey = env->NewStringUTF(key.c_str()); + if(jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const size_t len = transaction_ids.size(); + jlongArray jtransaction_ids = env->NewLongArray(static_cast(len)); + if(jtransaction_ids == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + return nullptr; + } + + jlong *body = env->GetLongArrayElements(jtransaction_ids, nullptr); + if(body == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + env->DeleteLocalRef(jtransaction_ids); + return nullptr; + } + for(size_t i = 0; i < len; ++i) { + body[i] = static_cast(transaction_ids[i]); + } + env->ReleaseLongArrayElements(jtransaction_ids, body, 0); + + jobject jwaiting_transactions = env->CallObjectMethod(jtransaction, + mid, static_cast(column_family_id), jkey, jtransaction_ids); + if(env->ExceptionCheck()) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jkey); + env->DeleteLocalRef(jtransaction_ids); + return nullptr; + } + + return jwaiting_transactions; + } +}; + +// The portal class for org.rocksdb.TransactionDB +class TransactionDBJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/TransactionDB"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.DeadlockInfo object + * + * @param env A pointer to the Java environment + * @param jtransaction A Java org.rocksdb.Transaction object + * @param column_family_id The id of the column family + * @param key The key + * @param transaction_ids The transaction ids + * + * @return A reference to a Java + * org.rocksdb.Transaction.WaitingTransactions object, + * or nullptr if an an exception occurs + */ + static jobject newDeadlockInfo(JNIEnv* env, jobject jtransaction_db, + const rocksdb::TransactionID transaction_id, + const uint32_t column_family_id, const std::string &waiting_key, + const bool exclusive) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "newDeadlockInfo", "(JJLjava/lang/String;Z)Lorg/rocksdb/TransactionDB$DeadlockInfo;"); + if(mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jwaiting_key = env->NewStringUTF(waiting_key.c_str()); + if(jwaiting_key == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // resolve the column family id to a ColumnFamilyHandle + jobject jdeadlock_info = env->CallObjectMethod(jtransaction_db, + mid, transaction_id, static_cast(column_family_id), + jwaiting_key, exclusive); + if(env->ExceptionCheck()) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jwaiting_key); + return nullptr; + } + + return jdeadlock_info; + } +}; + +// The portal class for org.rocksdb.TxnDBWritePolicy +class TxnDBWritePolicyJni { + public: + // Returns the equivalent org.rocksdb.TxnDBWritePolicy for the provided + // C++ rocksdb::TxnDBWritePolicy enum + static jbyte toJavaTxnDBWritePolicy( + const rocksdb::TxnDBWritePolicy& txndb_write_policy) { + switch(txndb_write_policy) { + case rocksdb::TxnDBWritePolicy::WRITE_COMMITTED: + return 0x0; + case rocksdb::TxnDBWritePolicy::WRITE_PREPARED: + return 0x1; + case rocksdb::TxnDBWritePolicy::WRITE_UNPREPARED: + return 0x2; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ rocksdb::TxnDBWritePolicy enum for the + // provided Java org.rocksdb.TxnDBWritePolicy + static rocksdb::TxnDBWritePolicy toCppTxnDBWritePolicy( + jbyte jtxndb_write_policy) { + switch(jtxndb_write_policy) { + case 0x0: + return rocksdb::TxnDBWritePolicy::WRITE_COMMITTED; + case 0x1: + return rocksdb::TxnDBWritePolicy::WRITE_PREPARED; + case 0x2: + return rocksdb::TxnDBWritePolicy::WRITE_UNPREPARED; + default: + // undefined/default + return rocksdb::TxnDBWritePolicy::WRITE_COMMITTED; + } + } +}; + +// The portal class for org.rocksdb.TransactionDB.KeyLockInfo +class KeyLockInfoJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.KeyLockInfo + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/TransactionDB$KeyLockInfo"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.KeyLockInfo object + * with the same properties as the provided C++ rocksdb::KeyLockInfo object + * + * @param env A pointer to the Java environment + * @param key_lock_info The rocksdb::KeyLockInfo object + * + * @return A reference to a Java + * org.rocksdb.TransactionDB.KeyLockInfo object, + * or nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, + const rocksdb::KeyLockInfo& key_lock_info) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", "(Ljava/lang/String;[JZ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jkey = env->NewStringUTF(key_lock_info.key.c_str()); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const jsize jtransaction_ids_len = static_cast(key_lock_info.ids.size()); + jlongArray jtransactions_ids = env->NewLongArray(jtransaction_ids_len); + if (jtransactions_ids == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + return nullptr; + } + + const jobject jkey_lock_info = env->NewObject(jclazz, mid, + jkey, jtransactions_ids, key_lock_info.exclusive); + if(jkey_lock_info == nullptr) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jtransactions_ids); + env->DeleteLocalRef(jkey); + return nullptr; + } + + return jkey_lock_info; + } +}; + +// The portal class for org.rocksdb.TransactionDB.DeadlockInfo +class DeadlockInfoJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.DeadlockInfo + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env,"org/rocksdb/TransactionDB$DeadlockInfo"); + } +}; + +// The portal class for org.rocksdb.TransactionDB.DeadlockPath +class DeadlockPathJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.DeadlockPath + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/TransactionDB$DeadlockPath"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.DeadlockPath object + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java + * org.rocksdb.TransactionDB.DeadlockPath object, + * or nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, + const jobjectArray jdeadlock_infos, const bool limit_exceeded) { + jclass jclazz = getJClass(env); + if(jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", "([LDeadlockInfo;Z)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jdeadlock_path = env->NewObject(jclazz, mid, + jdeadlock_infos, limit_exceeded); + if(jdeadlock_path == nullptr) { + // exception thrown: InstantiationException or OutOfMemoryError + return nullptr; + } + + return jdeadlock_path; + } +}; + // various utility functions for working with RocksDB and JNI class JniUtil { public: @@ -3497,7 +3854,7 @@ class ColumnFamilyDescriptorJni : public JavaClass { * nullptr if an an exception occurs */ static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) { - jbyteArray cfname = JniUtil::copyBytes(env, cfd->name); + jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name); jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options)); jclass jclazz = getJClass(env); @@ -3510,11 +3867,13 @@ class ColumnFamilyDescriptorJni : public JavaClass { "([BLorg/rocksdb/ColumnFamilyOptions;)V"); if (mid == nullptr) { // exception thrown: NoSuchMethodException or OutOfMemoryError + env->DeleteLocalRef(jcf_name); return nullptr; } - jobject jcfd = env->NewObject(jclazz, mid, cfname, cfopts); + jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts); if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcf_name); return nullptr; } @@ -3563,5 +3922,169 @@ class ColumnFamilyDescriptorJni : public JavaClass { } }; +class MapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.Map + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/Map"); + } + + /** + * Get the Java Method: Map#put + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retieved + */ + static jmethodID getMapPutMethodId(JNIEnv* env) { + jclass jlist_clazz = getClass(env); + if(jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } +}; + +class HashMapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.HashMap + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/HashMap"); + } + + /** + * Create a new Java java.util.HashMap object. + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java java.util.HashMap object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(I)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jhash_map = env->NewObject(jclazz, mid, static_cast(initial_capacity)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jhash_map; + } + + /** + * A function which maps a std::pair to a std::pair + * + * @return Either a pointer to a std::pair, or nullptr + * if an error occurs during the mapping + */ + template + using FnMapKV = std::function> (const std::pair&)>; + + // template ::value_type, std::pair>::value, int32_t>::type = 0> + // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV &fn_map_kv) { + /** + * Returns true if it succeeds, false if an error occurs + */ + template + static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV &fn_map_kv) { + const jmethodID jmid_put = rocksdb::MapJni::getMapPutMethodId(env); + if (jmid_put == nullptr) { + return false; + } + + for (auto it = iterator; it != end; ++it) { + const std::unique_ptr> result = fn_map_kv(*it); + if (result == nullptr) { + // an error occurred during fn_map_kv + return false; + } + env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + return false; + } + + // release local references + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + } + + return true; + } +}; + +class LongJni : public JavaClass { + public: + /** + * Get the Java Class java.lang.Long + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/Long"); + } + + static jobject valueOf(JNIEnv* env, jlong jprimitive_long) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jlong_obj = + env->CallStaticObjectMethod(jclazz, mid, jprimitive_long); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jlong_obj; + } +}; } // namespace rocksdb #endif // JAVA_ROCKSJNI_PORTAL_H_ diff --git a/java/rocksjni/transaction.cc b/java/rocksjni/transaction.cc new file mode 100644 index 000000000..b2f928fb9 --- /dev/null +++ b/java/rocksjni/transaction.cc @@ -0,0 +1,1535 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::Transaction. + +#include +#include + +#include "include/org_rocksdb_Transaction.h" + +#include "rocksdb/utilities/transaction.h" +#include "rocksjni/portal.h" + +using namespace std::placeholders; + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshot + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSnapshot(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSnapshot(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshotOnNextOperation + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__J(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSnapshotOnNextOperation(nullptr); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshotOnNextOperation + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__JJ(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jtxn_notifier_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* txn_notifier = + reinterpret_cast*>( + jtxn_notifier_handle); + txn->SetSnapshotOnNextOperation(*txn_notifier); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getSnapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getSnapshot(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + const rocksdb::Snapshot* snapshot = txn->GetSnapshot(); + return reinterpret_cast(snapshot); +} + +/* + * Class: org_rocksdb_Transaction + * Method: clearSnapshot + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_clearSnapshot(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->ClearSnapshot(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: prepare + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_prepare(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::Status s = txn->Prepare(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: commit + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_commit(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::Status s = txn->Commit(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: rollback + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_rollback(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::Status s = txn->Rollback(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSavePoint(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSavePoint(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: rollbackToSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_rollbackToSavePoint(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::Status s = txn->RollbackToSavePoint(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +typedef std::function FnGet; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +jbyteArray txn_get_helper(JNIEnv* env, const FnGet &fn_get, + const jlong &jread_options_handle, const jbyteArray &jkey, + const jint &jkey_part_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + + auto* read_options = + reinterpret_cast(jread_options_handle); + std::string value; + rocksdb::Status s = fn_get(*read_options, key_slice, &value); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = + env->NewByteArray(static_cast(value.size())); + if (jret_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion(jret_value, 0, static_cast(value.size()), + reinterpret_cast(value.c_str())); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return nullptr; + } + return jret_value; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; +} + +/* + * Class: org_rocksdb_Transaction + * Method: get + * Signature: (JJ[BIJ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BIJ(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jread_options_handle, jbyteArray jkey, jint jkey_part_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnGet fn_get = + std::bind( + &rocksdb::Transaction::Get, txn, _1, column_family_handle, _2, _3); + return txn_get_helper(env, fn_get, jread_options_handle, jkey, + jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: get + * Signature: (JJ[BI)[B + */ +jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BI(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jread_options_handle, jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get = + std::bind( + &rocksdb::Transaction::Get, txn, _1, _2, _3); + return txn_get_helper(env, fn_get, jread_options_handle, jkey, + jkey_part_len); +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +// used by txn_multi_get_helper below +std::vector txn_column_families_helper( + JNIEnv* env, jlongArray jcolumn_family_handles, bool* has_exception) { + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + if (len_cols > 0) { + if (env->EnsureLocalCapacity(len_cols) != 0) { + // out of memory + *has_exception = JNI_TRUE; + return std::vector(); + } + + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return std::vector(); + } + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + } + return cf_handles; +} + +typedef std::function ( + const rocksdb::ReadOptions&, + const std::vector&, + std::vector*)> FnMultiGet; + +void free_key_parts(JNIEnv* env, std::vector> key_parts_to_free) { + for (std::vector>::size_type i = 0; + i < key_parts_to_free.size(); i++) { + jobject jk; + jbyteArray jk_ba; + jbyte* jk_val; + std::tie(jk_ba, jk_val, jk) = key_parts_to_free[i]; + env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT); + env->DeleteLocalRef(jk); + } +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +// cf multi get +jobjectArray txn_multi_get_helper(JNIEnv* env, const FnMultiGet &fn_multi_get, + const jlong &jread_options_handle, const jobjectArray &jkey_parts) { + const jsize len_key_parts = env->GetArrayLength(jkey_parts); + if (env->EnsureLocalCapacity(len_key_parts) != 0) { + // out of memory + return nullptr; + } + + std::vector key_parts; + std::vector> key_parts_to_free; + for (int i = 0; i < len_key_parts; i++) { + const jobject jk = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_key_parts(env, key_parts_to_free); + return nullptr; + } + jbyteArray jk_ba = reinterpret_cast(jk); + const jsize len_key = env->GetArrayLength(jk_ba); + if (env->EnsureLocalCapacity(len_key) != 0) { + // out of memory + env->DeleteLocalRef(jk); + free_key_parts(env, key_parts_to_free); + return nullptr; + } + jbyte* jk_val = env->GetByteArrayElements(jk_ba, nullptr); + if (jk_val == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jk); + free_key_parts(env, key_parts_to_free); + return nullptr; + } + + rocksdb::Slice key_slice(reinterpret_cast(jk_val), len_key); + key_parts.push_back(key_slice); + + key_parts_to_free.push_back(std::make_tuple(jk_ba, jk_val, jk)); + } + + auto* read_options = + reinterpret_cast(jread_options_handle); + std::vector value_parts; + std::vector s = + fn_multi_get(*read_options, key_parts, &value_parts); + + // free up allocated byte arrays + free_key_parts(env, key_parts_to_free); + + // prepare the results + const jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = + env->NewObjectArray(static_cast(s.size()), jcls_ba, nullptr); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // add to the jresults + for (std::vector::size_type i = 0; i != s.size(); i++) { + if (s[i].ok()) { + jbyteArray jentry_value = + env->NewByteArray(static_cast(value_parts[i].size())); + if (jentry_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jentry_value, 0, static_cast(value_parts[i].size()), + reinterpret_cast(value_parts[i].c_str())); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + env->DeleteLocalRef(jentry_value); + } + } + + return jresults; +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGet + * Signature: (JJ[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B_3J(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts, jlongArray jcolumn_family_handles) { + bool has_exception = false; + const std::vector column_family_handles = + txn_column_families_helper(env, jcolumn_family_handles, &has_exception); + if (has_exception) { + // exception thrown: OutOfMemoryError + return nullptr; + } + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get = + std::bind (rocksdb::Transaction::*) (const rocksdb::ReadOptions&, const std::vector&, const std::vector&, std::vector*)>( + &rocksdb::Transaction::MultiGet, txn, _1, column_family_handles, _2, + _3); + return txn_multi_get_helper(env, fn_multi_get, jread_options_handle, + jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGet + * Signature: (JJ[[B)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts) { + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get = + std::bind (rocksdb::Transaction::*) (const rocksdb::ReadOptions&, const std::vector&, std::vector*)>( + &rocksdb::Transaction::MultiGet, txn, _1, _2, _3); + return txn_multi_get_helper(env, fn_multi_get, jread_options_handle, + jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getForUpdate + * Signature: (JJ[BIJZ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIJZ(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jread_options_handle, jbyteArray jkey, + jint jkey_part_len, jlong jcolumn_family_handle, jboolean jexclusive) { + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get_for_update = + std::bind( + &rocksdb::Transaction::GetForUpdate, txn, _1, column_family_handle, + _2, _3, jexclusive); + return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey, + jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getForUpdate + * Signature: (JJ[BIZ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIZ(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jread_options_handle, jbyteArray jkey, + jint jkey_part_len, jboolean jexclusive) { + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get_for_update = + std::bind( + &rocksdb::Transaction::GetForUpdate, txn, _1, _2, _3, jexclusive); + return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey, + jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGetForUpdate + * Signature: (JJ[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B_3J( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts, jlongArray jcolumn_family_handles) { + bool has_exception = false; + const std::vector column_family_handles = + txn_column_families_helper(env, jcolumn_family_handles, &has_exception); + if (has_exception) { + // exception thrown: OutOfMemoryError + return nullptr; + } + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get_for_update = + std::bind (rocksdb::Transaction::*) (const rocksdb::ReadOptions&, const std::vector&, const std::vector&, std::vector*)>( + &rocksdb::Transaction::MultiGetForUpdate, txn, _1, + column_family_handles, _2, _3); + return txn_multi_get_helper(env, fn_multi_get_for_update, + jread_options_handle, jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGetForUpdate + * Signature: (JJ[[B)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts) { + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get_for_update = + std::bind (rocksdb::Transaction::*) (const rocksdb::ReadOptions&, const std::vector&, std::vector*)>( + &rocksdb::Transaction::MultiGetForUpdate, txn, _1, _2, _3); + return txn_multi_get_helper(env, fn_multi_get_for_update, + jread_options_handle, jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getIterator + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_Transaction_getIterator__JJ(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jread_options_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* read_options = + reinterpret_cast(jread_options_handle); + return reinterpret_cast( + txn->GetIterator(*read_options)); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getIterator + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_Transaction_getIterator__JJJ(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jread_options_handle, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* read_options = + reinterpret_cast(jread_options_handle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + return reinterpret_cast( + txn->GetIterator(*read_options, column_family_handle)); +} + +typedef std::function FnWriteKV; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_kv_helper(JNIEnv* env, const FnWriteKV &fn_write_kv, + const jbyteArray &jkey, const jint &jkey_part_len, + const jbyteArray &jval, const jint &jval_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + jbyte* value = env->GetByteArrayElements(jval, nullptr); + if (value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + rocksdb::Slice value_slice(reinterpret_cast(value), jval_len); + + rocksdb::Status s = fn_write_kv(key_slice, value_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_put__J_3BI_3BIJ(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len, jbyteArray jval, + jint jval_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKV fn_put = + std::bind( + &rocksdb::Transaction::Put, txn, column_family_handle, _1, _2); + txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_put__J_3BI_3BI(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len, jbyteArray jval, + jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_put = + std::bind( + &rocksdb::Transaction::Put, txn, _1, _2); + txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len); +} + +typedef std::function FnWriteKVParts; + +void free_key_value_parts(JNIEnv* env, const int32_t len, + std::tuple jkey_parts_to_free[], + std::tuple jvalue_parts_to_free[]) { + for (int32_t i = len - 1; i >= 0; --i) { + jbyteArray jba_value_part; + jbyte* jvalue_part; + jobject jobj_value_part; + std::tie(jba_value_part, jvalue_part, jobj_value_part) = + jvalue_parts_to_free[i]; + env->ReleaseByteArrayElements(jba_value_part, jvalue_part, JNI_ABORT); + env->DeleteLocalRef(jobj_value_part); + + jbyteArray jba_key_part; + jbyte* jkey_part; + jobject jobj_key_part; + std::tie(jba_key_part, jkey_part, jobj_key_part) = + jkey_parts_to_free[i]; + env->ReleaseByteArrayElements(jba_key_part, jkey_part, JNI_ABORT); + env->DeleteLocalRef(jobj_key_part); + } +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_kv_parts_helper(JNIEnv* env, + const FnWriteKVParts &fn_write_kv_parts, const jobjectArray &jkey_parts, + const jint &jkey_parts_len, const jobjectArray &jvalue_parts, + const jint &jvalue_parts_len) { + assert(jkey_parts_len == jvalue_parts_len); + + rocksdb::Slice key_parts[jkey_parts_len]; + rocksdb::Slice value_parts[jvalue_parts_len]; + std::tuple jkey_parts_to_free[jkey_parts_len]; + std::tuple jvalue_parts_to_free[jvalue_parts_len]; + + // convert java key_parts/value_parts byte[][] to Slice(s) + for (jsize i = 0; i < jkey_parts_len; ++i) { + const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + const jobject jobj_value_part = env->GetObjectArrayElement(jvalue_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key_part); + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + + const jbyteArray jba_key_part = reinterpret_cast(jobj_key_part); + const jsize jkey_part_len = env->GetArrayLength(jba_key_part); + if (env->EnsureLocalCapacity(jkey_part_len) != 0) { + // out of memory + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr); + if (jkey_part == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + + const jbyteArray jba_value_part = reinterpret_cast(jobj_value_part); + const jsize jvalue_part_len = env->GetArrayLength(jba_value_part); + if (env->EnsureLocalCapacity(jvalue_part_len) != 0) { + // out of memory + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + jbyte* jvalue_part = env->GetByteArrayElements(jba_value_part, nullptr); + if (jvalue_part == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jba_value_part, jvalue_part, JNI_ABORT); + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + return; + } + + jkey_parts_to_free[i] = std::tuple( + jba_key_part, jkey_part, jobj_key_part); + jvalue_parts_to_free[i] = std::tuple( + jba_value_part, jvalue_part, jobj_value_part); + + key_parts[i] = + rocksdb::Slice(reinterpret_cast(jkey_part), jkey_part_len); + value_parts[i] = + rocksdb::Slice(reinterpret_cast(jvalue_part), jvalue_part_len); + } + + // call the write_multi function + rocksdb::Status s = fn_write_kv_parts( + rocksdb::SliceParts(key_parts, jkey_parts_len), + rocksdb::SliceParts(value_parts, jvalue_parts_len)); + + // cleanup temporary memory + free_key_value_parts(env, jkey_parts_len, jkey_parts_to_free, + jvalue_parts_to_free); + + // return + if (s.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[[BI[[BIJ)V + */ +void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jobjectArray jvalue_parts, jint jvalue_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKVParts fn_put_parts = + std::bind( + &rocksdb::Transaction::Put, txn, column_family_handle, _1, _2); + txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len, + jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[[BI[[BI)V + */ +void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jobjectArray jvalue_parts, jint jvalue_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKVParts fn_put_parts = + std::bind( + &rocksdb::Transaction::Put, txn, _1, _2); + txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len, + jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_merge__J_3BI_3BIJ(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len, jbyteArray jval, + jint jval_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKV fn_merge = + std::bind( + &rocksdb::Transaction::Merge, txn, column_family_handle, _1, _2); + txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_merge__J_3BI_3BI(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len, jbyteArray jval, + jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_merge = + std::bind( + &rocksdb::Transaction::Merge, txn, _1, _2); + txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len); +} + +typedef std::function FnWriteK; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_k_helper(JNIEnv* env, const FnWriteK &fn_write_k, + const jbyteArray &jkey, const jint &jkey_part_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + + rocksdb::Status s = fn_write_k(key_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_delete__J_3BIJ(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteK fn_delete = + std::bind( + &rocksdb::Transaction::Delete, txn, column_family_handle, _1); + txn_write_k_helper(env, fn_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_delete__J_3BI(JNIEnv* env, jobject jobj, + jlong jhandle, jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_delete = + std::bind( + &rocksdb::Transaction::Delete, txn, _1); + txn_write_k_helper(env, fn_delete, jkey, jkey_part_len); +} + +typedef std::function FnWriteKParts; + +void free_key_parts(JNIEnv* env, const int32_t len, + std::tuple jkey_parts_to_free[]) { + for (int32_t i = len - 1; i >= 0; --i) { + jbyteArray jba_key_part; + jbyte* jkey; + jobject jobj_key_part; + std::tie(jba_key_part, jkey, jobj_key_part) = jkey_parts_to_free[i]; + env->ReleaseByteArrayElements(jba_key_part, jkey, JNI_ABORT); + env->DeleteLocalRef(jobj_key_part); + } +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_k_parts_helper(JNIEnv* env, + const FnWriteKParts &fn_write_k_parts, const jobjectArray &jkey_parts, + const jint &jkey_parts_len) { + + rocksdb::Slice key_parts[jkey_parts_len]; + std::tuple jkey_parts_to_free[jkey_parts_len]; + + // convert java key_parts byte[][] to Slice(s) + for (jint i = 0; i < jkey_parts_len; ++i) { + const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_key_parts(env, jkey_parts_len, jkey_parts_to_free); + return; + } + + const jbyteArray jba_key_part = reinterpret_cast(jobj_key_part); + const jsize jkey_part_len = env->GetArrayLength(jba_key_part); + if (env->EnsureLocalCapacity(jkey_part_len) != 0) { + // out of memory + env->DeleteLocalRef(jobj_key_part); + free_key_parts(env, jkey_parts_len, jkey_parts_to_free); + return; + } + jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr); + if (jkey_part == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jobj_key_part); + free_key_parts(env, jkey_parts_len, jkey_parts_to_free); + return; + } + + jkey_parts_to_free[i] = std::tuple( + jba_key_part, jkey_part, jobj_key_part); + + key_parts[i] = rocksdb::Slice(reinterpret_cast(jkey_part), jkey_part_len); + } + + // call the write_multi function + rocksdb::Status s = fn_write_k_parts( + rocksdb::SliceParts(key_parts, jkey_parts_len)); + + // cleanup temporary memory + free_key_parts(env, jkey_parts_len, jkey_parts_to_free); + + // return + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[[BIJ)V + */ +void Java_org_rocksdb_Transaction_delete__J_3_3BIJ(JNIEnv* env, jobject jobj, + jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKParts fn_delete_parts = + std::bind( + &rocksdb::Transaction::Delete, txn, column_family_handle, _1); + txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_delete__J_3_3BI(JNIEnv* env, jobject jobj, + jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_delete_parts = + std::bind( + &rocksdb::Transaction::Delete, txn, _1); + txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteK fn_single_delete = + std::bind( + &rocksdb::Transaction::SingleDelete, txn, column_family_handle, _1); + txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_single_delete = + std::bind( + &rocksdb::Transaction::SingleDelete, txn, _1); + txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[[BIJ)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKParts fn_single_delete_parts = + std::bind( + &rocksdb::Transaction::SingleDelete, txn, column_family_handle, _1); + txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_single_delete_parts = + std::bind( + &rocksdb::Transaction::SingleDelete, txn, _1); + txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jbyteArray jval, jint jval_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKV fn_put_untracked = + std::bind( + &rocksdb::Transaction::PutUntracked, txn, column_family_handle, _1, + _2); + txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jbyteArray jval, jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_put_untracked = + std::bind( + &rocksdb::Transaction::PutUntracked, txn, _1, _2); + txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[[BI[[BIJ)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jobjectArray jvalue_parts, jint jvalue_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKVParts fn_put_parts_untracked = + std::bind( + &rocksdb::Transaction::PutUntracked, txn, column_family_handle, _1, + _2); + txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts, + jkey_parts_len, jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[[BI[[BI)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jobjectArray jvalue_parts, jint jvalue_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKVParts fn_put_parts_untracked = + std::bind( + &rocksdb::Transaction::PutUntracked, txn, _1, _2); + txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts, + jkey_parts_len, jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: mergeUntracked + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jbyteArray jval, jint jval_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKV fn_merge_untracked = + std::bind( + &rocksdb::Transaction::MergeUntracked, txn, column_family_handle, _1, + _2); + txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: mergeUntracked + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jbyteArray jval, jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_merge_untracked = + std::bind( + &rocksdb::Transaction::MergeUntracked, txn, _1, _2); + txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteK fn_delete_untracked = + std::bind( + &rocksdb::Transaction::DeleteUntracked, txn, column_family_handle, + _1); + txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_delete_untracked = + std::bind( + &rocksdb::Transaction::DeleteUntracked, txn, _1); + txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[[BIJ)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, jint jkey_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + FnWriteKParts fn_delete_untracked_parts = + std::bind( + &rocksdb::Transaction::DeleteUntracked, txn, column_family_handle, + _1); + txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_delete_untracked_parts = + std::bind( + &rocksdb::Transaction::DeleteUntracked, txn, _1); + txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_putLogData(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + txn->PutLogData(key_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: disableIndexing + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_disableIndexing(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->DisableIndexing(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: enableIndexing + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_enableIndexing(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->EnableIndexing(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumKeys(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumKeys(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumPuts + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumPuts(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumPuts(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumDeletes + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumDeletes(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumDeletes(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumMerges(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumMerges(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getElapsedTime + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getElapsedTime(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetElapsedTime(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWriteBatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getWriteBatch(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return reinterpret_cast(txn->GetWriteBatch()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setLockTimeout(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jlock_timeout) { + auto* txn = reinterpret_cast(jhandle); + txn->SetLockTimeout(jlock_timeout); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWriteOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getWriteOptions(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return reinterpret_cast(txn->GetWriteOptions()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setWriteOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setWriteOptions(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jwrite_options_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + txn->SetWriteOptions(*write_options); +} + +/* + * Class: org_rocksdb_Transaction + * Method: undo + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BIJ(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast(jcolumn_family_handle); + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + txn->UndoGetForUpdate(column_family_handle, key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: undoGetForUpdate + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BI(JNIEnv* env, + jobject jobj, jlong jhandle, jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_part_len); + txn->UndoGetForUpdate(key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: rebuildFromWriteBatch + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_rebuildFromWriteBatch(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jwrite_batch_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* write_batch = + reinterpret_cast(jwrite_batch_handle); + rocksdb::Status s = txn->RebuildFromWriteBatch(write_batch); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: getCommitTimeWriteBatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getCommitTimeWriteBatch(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return reinterpret_cast(txn->GetCommitTimeWriteBatch()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setLogNumber + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setLogNumber(JNIEnv* env, jobject jobj, + jlong jhandle, jlong jlog_number) { + auto* txn = reinterpret_cast(jhandle); + txn->SetLogNumber(jlog_number); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getLogNumber + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getLogNumber(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetLogNumber(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setName + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_Transaction_setName(JNIEnv* env, jobject jobj, + jlong jhandle, jstring jname) { + auto* txn = reinterpret_cast(jhandle); + const char* name = env->GetStringUTFChars(jname, nullptr); + if (name == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + rocksdb::Status s = txn->SetName(name); + + env->ReleaseStringUTFChars(jname, name); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: getName + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_Transaction_getName(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::TransactionName name = txn->GetName(); + return env->NewStringUTF(name.data()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getID + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getID(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::TransactionID id = txn->GetID(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_Transaction + * Method: isDeadlockDetect + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Transaction_isDeadlockDetect(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return static_cast(txn->IsDeadlockDetect()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWaitingTxns + * Signature: (J)Lorg/rocksdb/Transaction/WaitingTransactions; + */ +jobject Java_org_rocksdb_Transaction_getWaitingTxns(JNIEnv* env, + jobject jtransaction_obj, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + uint32_t column_family_id; + std::string key; + std::vector waiting_txns = + txn->GetWaitingTxns(&column_family_id, &key); + jobject jwaiting_txns = + rocksdb::TransactionJni::newWaitingTransactions( + env, jtransaction_obj, column_family_id, key, waiting_txns); + return jwaiting_txns; +} + +/* + * Class: org_rocksdb_Transaction + * Method: getState + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Transaction_getState(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + rocksdb::Transaction::TransactionState txn_status = txn->GetState(); + switch (txn_status) { + case rocksdb::Transaction::TransactionState::STARTED: + return 0x0; + + case rocksdb::Transaction::TransactionState::AWAITING_PREPARE: + return 0x1; + + case rocksdb::Transaction::TransactionState::PREPARED: + return 0x2; + + case rocksdb::Transaction::TransactionState::AWAITING_COMMIT: + return 0x3; + + case rocksdb::Transaction::TransactionState::COMMITED: + return 0x4; + + case rocksdb::Transaction::TransactionState::AWAITING_ROLLBACK: + return 0x5; + + case rocksdb::Transaction::TransactionState::ROLLEDBACK: + return 0x6; + + case rocksdb::Transaction::TransactionState::LOCKS_STOLEN: + return 0x7; + } + + assert(false); + return 0xFF; +} + +/* + * Class: org_rocksdb_Transaction + * Method: getId + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getId(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + uint64_t id = txn->GetId(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_Transaction + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_disposeInternal(JNIEnv* env, jobject jobj, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/transaction_db.cc b/java/rocksjni/transaction_db.cc new file mode 100644 index 000000000..6bb802420 --- /dev/null +++ b/java/rocksjni/transaction_db.cc @@ -0,0 +1,431 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::TransactionDB. + +#include +#include +#include +#include + + +#include "include/org_rocksdb_TransactionDB.h" + +#include "rocksdb/options.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksdb/utilities/transaction_db.h" + +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionDB + * Method: open + * Signature: (JJLjava/lang/String;)J + */ +jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2(JNIEnv* env, + jclass jcls, jlong joptions_handle, jlong jtxn_db_options_handle, + jstring jdb_path) { + auto* options = reinterpret_cast(joptions_handle); + auto* txn_db_options = + reinterpret_cast(jtxn_db_options_handle); + rocksdb::TransactionDB* tdb = nullptr; + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + rocksdb::Status s = + rocksdb::TransactionDB::Open(*options, *txn_db_options, db_path, &tdb); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return reinterpret_cast(tdb); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: open + * Signature: (JJLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jdb_options_handle, + jlong jtxn_db_options_handle, jstring jdb_path, + jobjectArray jcolumn_names, + jlongArray jcolumn_options_handles) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const jsize len_cols = env->GetArrayLength(jcolumn_names); + if (env->EnsureLocalCapacity(len_cols) != 0) { + // out of memory + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + jlong* jco = env->GetLongArrayElements(jcolumn_options_handles, nullptr); + if (jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + std::vector column_families; + for (int i = 0; i < len_cols; i++) { + const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + const jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr); + if (jcf_name == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const int jcf_name_len = env->GetArrayLength(jcn_ba); + if (env->EnsureLocalCapacity(jcf_name_len) != 0) { + // out of memory + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + const std::string cf_name(reinterpret_cast(jcf_name), jcf_name_len); + const rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + + auto* db_options = reinterpret_cast(jdb_options_handle); + auto* txn_db_options = + reinterpret_cast(jtxn_db_options_handle); + std::vector handles; + rocksdb::TransactionDB* tdb = nullptr; + const rocksdb::Status s = rocksdb::TransactionDB::Open(*db_options, *txn_db_options, + db_path, column_families, &handles, &tdb); + + // check if open operation was successful + if (s.ok()) { + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(tdb); + for (int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; + } + return jresults; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jwrite_options_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + rocksdb::Transaction* txn = txn_db->BeginTransaction(*write_options); + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* txn_options = + reinterpret_cast(jtxn_options_handle); + rocksdb::Transaction* txn = + txn_db->BeginTransaction(*write_options, *txn_options); + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* old_txn = reinterpret_cast(jold_txn_handle); + rocksdb::TransactionOptions txn_options; + rocksdb::Transaction* txn = + txn_db->BeginTransaction(*write_options, txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(txn == old_txn); + + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle, jlong jold_txn_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* txn_options = + reinterpret_cast(jtxn_options_handle); + auto* old_txn = reinterpret_cast(jold_txn_handle); + rocksdb::Transaction* txn = txn_db->BeginTransaction(*write_options, + *txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(txn == old_txn); + + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getTransactionByName + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env, + jobject jobj, jlong jhandle, jstring jname) { + auto* txn_db = reinterpret_cast(jhandle); + const char* name = env->GetStringUTFChars(jname, nullptr); + if (name == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + rocksdb::Transaction* txn = txn_db->GetTransactionByName(name); + env->ReleaseStringUTFChars(jname, name); + return reinterpret_cast(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getAllPreparedTransactions + * Signature: (J)[J + */ +jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + std::vector txns; + txn_db->GetAllPreparedTransactions(&txns); + + const size_t size = txns.size(); + assert(size < UINT32_MAX); // does it fit in a jint? + + const jsize len = static_cast(size); + jlong tmp[len]; + for (jsize i = 0; i < len; ++i) { + tmp[i] = reinterpret_cast(txns[i]); + } + + jlongArray jtxns = env->NewLongArray(len); + if (jtxns == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jtxns, 0, len, tmp); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jtxns); + return nullptr; + } + + return jtxns; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getLockStatusData + * Signature: (J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_TransactionDB_getLockStatusData( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + const std::unordered_multimap lock_status_data = + txn_db->GetLockStatusData(); + const jobject jlock_status_data = rocksdb::HashMapJni::construct(env, + static_cast(lock_status_data.size())); + if (jlock_status_data == nullptr) { + // exception occurred + return nullptr; + } + + const rocksdb::HashMapJni::FnMapKV fn_map_kv = + [env, txn_db, &lock_status_data](const std::pair& pair) { + const jobject jlong_column_family_id = + rocksdb::LongJni::valueOf(env, pair.first); + if (jlong_column_family_id == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + const jobject jkey_lock_info = + rocksdb::KeyLockInfoJni::construct(env, pair.second); + if (jkey_lock_info == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + return std::unique_ptr>(new std::pair(jlong_column_family_id, + jkey_lock_info)); + }; + + if(!rocksdb::HashMapJni::putAll(env, jlock_status_data, + lock_status_data.begin(), lock_status_data.end(), fn_map_kv)) { + // exception occcurred + return nullptr; + } + + return jlock_status_data; +} + +/* +* Class: org_rocksdb_TransactionDB +* Method: getDeadlockInfoBuffer +* Signature: (J)[Lorg/rocksdb/TransactionDB/DeadlockPath; +*/ +jobjectArray Java_org_rocksdb_TransactionDB_getDeadlockInfoBuffer( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + const std::vector deadlock_info_buffer = + txn_db->GetDeadlockInfoBuffer(); + + const jsize deadlock_info_buffer_len = + static_cast(deadlock_info_buffer.size()); + jobjectArray jdeadlock_info_buffer = + env->NewObjectArray(deadlock_info_buffer_len, + rocksdb::DeadlockPathJni::getJClass(env), nullptr); + if (jdeadlock_info_buffer == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + jsize jdeadlock_info_buffer_offset = 0; + + auto buf_end = deadlock_info_buffer.end(); + for (auto buf_it = deadlock_info_buffer.begin(); buf_it != buf_end; ++buf_it) { + const rocksdb::DeadlockPath deadlock_path = *buf_it; + const std::vector deadlock_infos + = deadlock_path.path; + const jsize deadlock_infos_len = + static_cast(deadlock_info_buffer.size()); + jobjectArray jdeadlock_infos = env->NewObjectArray(deadlock_infos_len, + rocksdb::DeadlockInfoJni::getJClass(env), nullptr); + if (jdeadlock_infos == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + jsize jdeadlock_infos_offset = 0; + + auto infos_end = deadlock_infos.end(); + for (auto infos_it = deadlock_infos.begin(); infos_it != infos_end; ++infos_it) { + const rocksdb::DeadlockInfo deadlock_info = *infos_it; + const jobject jdeadlock_info = rocksdb::TransactionDBJni::newDeadlockInfo( + env, jobj, deadlock_info.m_txn_id, deadlock_info.m_cf_id, + deadlock_info.m_waiting_key, deadlock_info.m_exclusive); + if (jdeadlock_info == nullptr) { + // exception occcurred + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + env->SetObjectArrayElement(jdeadlock_infos, jdeadlock_infos_offset++, jdeadlock_info); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException or ArrayStoreException + env->DeleteLocalRef(jdeadlock_info); + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + } + + const jobject jdeadlock_path = + rocksdb::DeadlockPathJni::construct(env, jdeadlock_infos, + deadlock_path.limit_exceeded); + if(jdeadlock_path == nullptr) { + // exception occcurred + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + env->SetObjectArrayElement(jdeadlock_info_buffer, jdeadlock_info_buffer_offset++, jdeadlock_path); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException or ArrayStoreException + env->DeleteLocalRef(jdeadlock_path); + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + } + + return jdeadlock_info_buffer; +} + +/* +* Class: org_rocksdb_TransactionDB +* Method: setDeadlockInfoBufferSize +* Signature: (JI)V +*/ +void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, jint jdeadlock_info_buffer_size) { + auto* txn_db = reinterpret_cast(jhandle); + txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDB_disposeInternal(JNIEnv* env, jobject jobj, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/transaction_db_options.cc b/java/rocksjni/transaction_db_options.cc new file mode 100644 index 000000000..600bce18f --- /dev/null +++ b/java/rocksjni/transaction_db_options.cc @@ -0,0 +1,147 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::TransactionDBOptions. + +#include + +#include "include/org_rocksdb_TransactionDBOptions.h" + +#include "rocksdb/utilities/transaction_db.h" + +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: newTransactionDBOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_TransactionDBOptions_newTransactionDBOptions( + JNIEnv* env, jclass jcls) { + rocksdb::TransactionDBOptions* opts = new rocksdb::TransactionDBOptions(); + return reinterpret_cast(opts); +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getMaxNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getMaxNumLocks(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->max_num_locks; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setMaxNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setMaxNumLocks(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jmax_num_locks) { + auto* opts = reinterpret_cast(jhandle); + opts->max_num_locks = jmax_num_locks; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getNumStripes + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getNumStripes(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->num_stripes; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setNumStripes + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setNumStripes(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jnum_stripes) { + auto* opts = reinterpret_cast(jhandle); + opts->num_stripes = jnum_stripes; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getTransactionLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getTransactionLockTimeout( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->transaction_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setTransactionLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setTransactionLockTimeout( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jtransaction_lock_timeout) { + auto* opts = reinterpret_cast(jhandle); + opts->transaction_lock_timeout = jtransaction_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getDefaultLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getDefaultLockTimeout( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->default_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setDefaultLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setDefaultLockTimeout( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdefault_lock_timeout) { + auto* opts = reinterpret_cast(jhandle); + opts->default_lock_timeout = jdefault_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getWritePolicy + * Signature: (J)B + */ +jbyte Java_org_rocksdb_TransactionDBOptions_getWritePolicy( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return rocksdb::TxnDBWritePolicyJni::toJavaTxnDBWritePolicy(opts->write_policy); +} + +/* +* Class: org_rocksdb_TransactionDBOptions +* Method: setWritePolicy +* Signature: (JB)V +*/ +void Java_org_rocksdb_TransactionDBOptions_setWritePolicy( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jwrite_policy) { + auto* opts = reinterpret_cast(jhandle); + opts->write_policy = + rocksdb::TxnDBWritePolicyJni::toCppTxnDBWritePolicy(jwrite_policy); +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDBOptions_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/rocksjni/transaction_notifier.cc b/java/rocksjni/transaction_notifier.cc new file mode 100644 index 000000000..3fdb4fb17 --- /dev/null +++ b/java/rocksjni/transaction_notifier.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::TransactionNotifier. + +#include + +#include "include/org_rocksdb_AbstractTransactionNotifier.h" +#include "rocksjni/transaction_notifier_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTransactionNotifier + * Method: createNewTransactionNotifier + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTransactionNotifier_createNewTransactionNotifier( + JNIEnv* env, jobject jobj) { + auto* transaction_notifier = + new rocksdb::TransactionNotifierJniCallback(env, jobj); + auto* sptr_transaction_notifier = + new std::shared_ptr( + transaction_notifier); + return reinterpret_cast(sptr_transaction_notifier); +} + +/* + * Class: org_rocksdb_AbstractTransactionNotifier + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractTransactionNotifier_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { + // TODO(AR) refactor to use JniCallback::JniCallback + // when https://github.com/facebook/rocksdb/pull/1241/ is merged + std::shared_ptr* handle = + reinterpret_cast*>(jhandle); + delete handle; +} diff --git a/java/rocksjni/transaction_notifier_jnicallback.cc b/java/rocksjni/transaction_notifier_jnicallback.cc new file mode 100644 index 000000000..85f2a194b --- /dev/null +++ b/java/rocksjni/transaction_notifier_jnicallback.cc @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TransactionNotifier. + +#include "rocksjni/transaction_notifier_jnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { + +TransactionNotifierJniCallback::TransactionNotifierJniCallback(JNIEnv* env, + jobject jtransaction_notifier) : JniCallback(env, jtransaction_notifier) { + // we cache the method id for the JNI callback + m_jsnapshot_created_methodID = + AbstractTransactionNotifierJni::getSnapshotCreatedMethodId(env); +} + +void TransactionNotifierJniCallback::SnapshotCreated( + const Snapshot* newSnapshot) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + env->CallVoidMethod(m_jcallback_obj, + m_jsnapshot_created_methodID, reinterpret_cast(newSnapshot)); + + if(env->ExceptionCheck()) { + // exception thrown from CallVoidMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + releaseJniEnv(attached_thread); +} +} // namespace rocksdb diff --git a/java/rocksjni/transaction_notifier_jnicallback.h b/java/rocksjni/transaction_notifier_jnicallback.h new file mode 100644 index 000000000..8f67cdb8b --- /dev/null +++ b/java/rocksjni/transaction_notifier_jnicallback.h @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::TransactionNotifier. + +#ifndef JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ + +#include + +#include "rocksdb/utilities/transaction.h" +#include "rocksjni/jnicallback.h" + +namespace rocksdb { + +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB TransactionDB or OptimisticTransactionDB (C++), + * we then callback to the appropriate Java method + * this enables TransactionNotifier to be implemented in Java. + * + * Unlike RocksJava's Comparator JNI Callback, we do not attempt + * to reduce Java object allocations by caching the Snapshot object + * presented to the callback. This could be revisited in future + * if performance is lacking. + */ +class TransactionNotifierJniCallback: public JniCallback, + public TransactionNotifier { + public: + TransactionNotifierJniCallback(JNIEnv* env, jobject jtransaction_notifier); + virtual void SnapshotCreated(const Snapshot* newSnapshot); + + private: + jmethodID m_jsnapshot_created_methodID; +}; +} // namespace rocksdb + +#endif // JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ diff --git a/java/rocksjni/transaction_options.cc b/java/rocksjni/transaction_options.cc new file mode 100644 index 000000000..13ec3b9fd --- /dev/null +++ b/java/rocksjni/transaction_options.cc @@ -0,0 +1,166 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::TransactionOptions. + +#include + +#include "include/org_rocksdb_TransactionOptions.h" + +#include "rocksdb/utilities/transaction_db.h" + +/* + * Class: org_rocksdb_TransactionOptions + * Method: newTransactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_TransactionOptions_newTransactionOptions(JNIEnv* env, + jclass jcls) { + auto* opts = new rocksdb::TransactionOptions(); + return reinterpret_cast(opts); +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: isSetSnapshot + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionOptions_isSetSnapshot(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->set_snapshot; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setSetSnapshot + * Signature: (JZ)V + */ +void Java_org_rocksdb_TransactionOptions_setSetSnapshot(JNIEnv* env, + jobject jobj, jlong jhandle, jboolean jset_snapshot) { + auto* opts = reinterpret_cast(jhandle); + opts->set_snapshot = jset_snapshot; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: isDeadlockDetect + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionOptions_isDeadlockDetect( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->deadlock_detect; +} + +/* +* Class: org_rocksdb_TransactionOptions +* Method: setDeadlockDetect +* Signature: (JZ)V +*/ +void Java_org_rocksdb_TransactionOptions_setDeadlockDetect( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jdeadlock_detect) { + auto* opts = reinterpret_cast(jhandle); + opts->deadlock_detect = jdeadlock_detect; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getLockTimeout(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setLockTimeout(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jlock_timeout) { + auto* opts = reinterpret_cast(jhandle); + opts->lock_timeout = jlock_timeout; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getExpiration + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getExpiration(JNIEnv* env, + jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->expiration; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setExpiration + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setExpiration(JNIEnv* env, + jobject jobj, jlong jhandle, jlong jexpiration) { + auto* opts = reinterpret_cast(jhandle); + opts->expiration = jexpiration; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getDeadlockDetectDepth + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getDeadlockDetectDepth( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->deadlock_detect_depth; +} + +/* +* Class: org_rocksdb_TransactionOptions +* Method: setDeadlockDetectDepth +* Signature: (JJ)V +*/ +void Java_org_rocksdb_TransactionOptions_setDeadlockDetectDepth( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jdeadlock_detect_depth) { + auto* opts = reinterpret_cast(jhandle); + opts->deadlock_detect_depth = jdeadlock_detect_depth; +} + +/* +* Class: org_rocksdb_TransactionOptions +* Method: getMaxWriteBatchSize +* Signature: (J)J +*/ +jlong Java_org_rocksdb_TransactionOptions_getMaxWriteBatchSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return opts->max_write_batch_size; +} + +/* +* Class: org_rocksdb_TransactionOptions +* Method: setMaxWriteBatchSize +* Signature: (JJ)V +*/ +void Java_org_rocksdb_TransactionOptions_setMaxWriteBatchSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_write_batch_size) { + auto* opts = reinterpret_cast(jhandle); + opts->max_write_batch_size = jmax_write_batch_size; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionOptions_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/java/samples/src/main/java/OptimisticTransactionSample.java b/java/samples/src/main/java/OptimisticTransactionSample.java new file mode 100644 index 000000000..1633d1f2b --- /dev/null +++ b/java/samples/src/main/java/OptimisticTransactionSample.java @@ -0,0 +1,184 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import org.rocksdb.*; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Demonstrates using Transactions on an OptimisticTransactionDB with + * varying isolation guarantees + */ +public class OptimisticTransactionSample { + private static final String dbPath = "/tmp/rocksdb_optimistic_transaction_example"; + + public static final void main(final String args[]) throws RocksDBException { + + try(final Options options = new Options() + .setCreateIfMissing(true); + final OptimisticTransactionDB txnDb = + OptimisticTransactionDB.open(options, dbPath)) { + + try (final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + //////////////////////////////////////////////////////// + // + // Simple OptimisticTransaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + readCommitted(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + repeatableRead(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions); + } + } + } + + /** + * Demonstrates "Read Committed" isolation + */ + private static void readCommitted(final OptimisticTransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + final byte key1[] = "abc".getBytes(UTF_8); + final byte value1[] = "def".getBytes(UTF_8); + + final byte key2[] = "xyz".getBytes(UTF_8); + final byte value2[] = "zzz".getBytes(UTF_8); + + // Start a transaction + try(final Transaction txn = txnDb.beginTransaction(writeOptions)) { + // Read a key in this transaction + byte[] value = txn.get(readOptions, key1); + assert(value == null); + + // Write a key in this transaction + txn.put(key1, value1); + + // Read a key OUTSIDE this transaction. Does not affect txn. + value = txnDb.get(readOptions, key1); + assert(value == null); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. + // If we wrote key 'abc' here, the transaction would fail to commit. + txnDb.put(writeOptions, key2, value2); + + // Commit transaction + txn.commit(); + } + } + + /** + * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation + */ + private static void repeatableRead(final OptimisticTransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + + final byte key1[] = "ghi".getBytes(UTF_8); + final byte value1[] = "jkl".getBytes(UTF_8); + + // Set a snapshot at start of transaction by setting setSnapshot(true) + try(final OptimisticTransactionOptions txnOptions = + new OptimisticTransactionOptions().setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + final Snapshot snapshot = txn.getSnapshot(); + + // Write a key OUTSIDE of transaction + txnDb.put(writeOptions, key1, value1); + + // Read a key using the snapshot. + readOptions.setSnapshot(snapshot); + final byte[] value = txn.getForUpdate(readOptions, key1, true); + assert(value == value1); + + try { + // Attempt to commit transaction + txn.commit(); + throw new IllegalStateException(); + } catch(final RocksDBException e) { + // Transaction could not commit since the write outside of the txn + // conflicted with the read! + assert(e.getStatus().getCode() == Status.Code.Busy); + } + + txn.rollback(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } + + /** + * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation + * + * In this example, we set the snapshot multiple times. This is probably + * only necessary if you have very strict isolation requirements to + * implement. + */ + private static void readCommitted_monotonicAtomicViews( + final OptimisticTransactionDB txnDb, final WriteOptions writeOptions, + final ReadOptions readOptions) throws RocksDBException { + + final byte keyX[] = "x".getBytes(UTF_8); + final byte valueX[] = "x".getBytes(UTF_8); + + final byte keyY[] = "y".getBytes(UTF_8); + final byte valueY[] = "y".getBytes(UTF_8); + + try (final OptimisticTransactionOptions txnOptions = + new OptimisticTransactionOptions().setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + // Do some reads and writes to key "x" + Snapshot snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + byte[] value = txn.get(readOptions, keyX); + txn.put(valueX, valueX); + + // Do a write outside of the transaction to key "y" + txnDb.put(writeOptions, keyY, valueY); + + // Set a new snapshot in the transaction + txn.setSnapshot(); + snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + + // Do some reads and writes to key "y" + // Since the snapshot was advanced, the write done outside of the + // transaction does not conflict. + value = txn.getForUpdate(readOptions, keyY, true); + txn.put(keyY, valueY); + + // Commit. Since the snapshot was advanced, the write done outside of the + // transaction does not prevent this transaction from Committing. + txn.commit(); + + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } +} diff --git a/java/samples/src/main/java/TransactionSample.java b/java/samples/src/main/java/TransactionSample.java new file mode 100644 index 000000000..b88a68f12 --- /dev/null +++ b/java/samples/src/main/java/TransactionSample.java @@ -0,0 +1,183 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import org.rocksdb.*; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Demonstrates using Transactions on a TransactionDB with + * varying isolation guarantees + */ +public class TransactionSample { + private static final String dbPath = "/tmp/rocksdb_transaction_example"; + + public static final void main(final String args[]) throws RocksDBException { + + try(final Options options = new Options() + .setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDb = + TransactionDB.open(options, txnDbOptions, dbPath)) { + + try (final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + //////////////////////////////////////////////////////// + // + // Simple Transaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + readCommitted(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + repeatableRead(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions); + } + } + } + + /** + * Demonstrates "Read Committed" isolation + */ + private static void readCommitted(final TransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + final byte key1[] = "abc".getBytes(UTF_8); + final byte value1[] = "def".getBytes(UTF_8); + + final byte key2[] = "xyz".getBytes(UTF_8); + final byte value2[] = "zzz".getBytes(UTF_8); + + // Start a transaction + try(final Transaction txn = txnDb.beginTransaction(writeOptions)) { + // Read a key in this transaction + byte[] value = txn.get(readOptions, key1); + assert(value == null); + + // Write a key in this transaction + txn.put(key1, value1); + + // Read a key OUTSIDE this transaction. Does not affect txn. + value = txnDb.get(readOptions, key1); + assert(value == null); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. + // If we wrote key 'abc' here, the transaction would fail to commit. + txnDb.put(writeOptions, key2, value2); + + // Commit transaction + txn.commit(); + } + } + + /** + * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation + */ + private static void repeatableRead(final TransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + + final byte key1[] = "ghi".getBytes(UTF_8); + final byte value1[] = "jkl".getBytes(UTF_8); + + // Set a snapshot at start of transaction by setting setSnapshot(true) + try(final TransactionOptions txnOptions = new TransactionOptions() + .setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + final Snapshot snapshot = txn.getSnapshot(); + + // Write a key OUTSIDE of transaction + txnDb.put(writeOptions, key1, value1); + + // Attempt to read a key using the snapshot. This will fail since + // the previous write outside this txn conflicts with this read. + readOptions.setSnapshot(snapshot); + + try { + final byte[] value = txn.getForUpdate(readOptions, key1, true); + throw new IllegalStateException(); + } catch(final RocksDBException e) { + assert(e.getStatus().getCode() == Status.Code.Busy); + } + + txn.rollback(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } + + /** + * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation + * + * In this example, we set the snapshot multiple times. This is probably + * only necessary if you have very strict isolation requirements to + * implement. + */ + private static void readCommitted_monotonicAtomicViews( + final TransactionDB txnDb, final WriteOptions writeOptions, + final ReadOptions readOptions) throws RocksDBException { + + final byte keyX[] = "x".getBytes(UTF_8); + final byte valueX[] = "x".getBytes(UTF_8); + + final byte keyY[] = "y".getBytes(UTF_8); + final byte valueY[] = "y".getBytes(UTF_8); + + try (final TransactionOptions txnOptions = new TransactionOptions() + .setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + // Do some reads and writes to key "x" + Snapshot snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + byte[] value = txn.get(readOptions, keyX); + txn.put(valueX, valueX); + + // Do a write outside of the transaction to key "y" + txnDb.put(writeOptions, keyY, valueY); + + // Set a new snapshot in the transaction + txn.setSnapshot(); + txn.setSavePoint(); + snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + + // Do some reads and writes to key "y" + // Since the snapshot was advanced, the write done outside of the + // transaction does not conflict. + value = txn.getForUpdate(readOptions, keyY, true); + txn.put(keyY, valueY); + + // Decide we want to revert the last write from this transaction. + txn.rollbackToSavePoint(); + + // Commit. + txn.commit(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } +} diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java new file mode 100644 index 000000000..cbb49836d --- /dev/null +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Provides notification to the caller of SetSnapshotOnNextOperation when + * the actual snapshot gets created + */ +public abstract class AbstractTransactionNotifier + extends RocksCallbackObject { + + protected AbstractTransactionNotifier() { + super(); + } + + /** + * Implement this method to receive notification when a snapshot is + * requested via {@link Transaction#setSnapshotOnNextOperation()}. + * + * @param newSnapshot the snapshot that has been created. + */ + public abstract void snapshotCreated(final Snapshot newSnapshot); + + /** + * This is intentionally private as it is the callback hook + * from JNI + */ + private void snapshotCreated(final long snapshotHandle) { + snapshotCreated(new Snapshot(snapshotHandle)); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTransactionNotifier(); + } + + private native long createNewTransactionNotifier(); + + /** + * Deletes underlying C++ TransactionNotifier pointer. + * + * Note that this function should be called only after all + * Transactions referencing the comparator are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java index d932fd9a9..8bb570e5d 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java @@ -5,6 +5,8 @@ package org.rocksdb; +import java.util.Arrays; + /** *

Describes a column family with a * name and respective Options.

@@ -32,7 +34,7 @@ public class ColumnFamilyDescriptor { * @since 3.10.0 */ public ColumnFamilyDescriptor(final byte[] columnFamilyName, - final ColumnFamilyOptions columnFamilyOptions) { + final ColumnFamilyOptions columnFamilyOptions) { columnFamilyName_ = columnFamilyName; columnFamilyOptions_ = columnFamilyOptions; } @@ -43,19 +45,65 @@ public class ColumnFamilyDescriptor { * @return column family name. * @since 3.10.0 */ - public byte[] columnFamilyName() { + public byte[] getName() { return columnFamilyName_; } + /** + * Retrieve name of column family. + * + * @return column family name. + * @since 3.10.0 + * + * @deprecated Use {@link #getName()} instead. + */ + @Deprecated + public byte[] columnFamilyName() { + return getName(); + } + /** * Retrieve assigned options instance. * * @return Options instance assigned to this instance. */ - public ColumnFamilyOptions columnFamilyOptions() { + public ColumnFamilyOptions getOptions() { return columnFamilyOptions_; } + /** + * Retrieve assigned options instance. + * + * @return Options instance assigned to this instance. + * + * @deprecated Use {@link #getOptions()} instead. + */ + @Deprecated + public ColumnFamilyOptions columnFamilyOptions() { + return getOptions(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ColumnFamilyDescriptor that = (ColumnFamilyDescriptor) o; + return Arrays.equals(columnFamilyName_, that.columnFamilyName_) + && columnFamilyOptions_.nativeHandle_ == that.columnFamilyOptions_.nativeHandle_; + } + + @Override + public int hashCode() { + int result = (int) (columnFamilyOptions_.nativeHandle_ ^ (columnFamilyOptions_.nativeHandle_ >>> 32)); + result = 31 * result + Arrays.hashCode(columnFamilyName_); + return result; + } + private final byte[] columnFamilyName_; private final ColumnFamilyOptions columnFamilyOptions_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 7726cc62d..16b9c609b 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -5,6 +5,9 @@ package org.rocksdb; +import java.util.Arrays; +import java.util.Objects; + /** * ColumnFamilyHandle class to hold handles to underlying rocksdb * ColumnFamily Pointers. @@ -21,6 +24,63 @@ public class ColumnFamilyHandle extends RocksObject { this.rocksDB_ = rocksDB; } + /** + * Gets the name of the Column Family. + * + * @return The name of the Column Family. + */ + public byte[] getName() { + return getName(nativeHandle_); + } + + /** + * Gets the ID of the Column Family. + * + * @return the ID of the Column Family. + */ + public int getID() { + return getID(nativeHandle_); + } + + /** + * Gets the up-to-date descriptor of the column family + * associated with this handle. Since it fills "*desc" with the up-to-date + * information, this call might internally lock and release DB mutex to + * access the up-to-date CF options. In addition, all the pointer-typed + * options cannot be referenced any longer than the original options exist. + * + * Note that this function is not supported in RocksDBLite. + * + * @return the up-to-date descriptor. + * + * @throws RocksDBException if an error occurs whilst retrieving the + * descriptor. + */ + public ColumnFamilyDescriptor getDescriptor() throws RocksDBException { + assert(isOwningHandle()); + return getDescriptor(nativeHandle_); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ColumnFamilyHandle that = (ColumnFamilyHandle) o; + return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && + getID() == that.getID() && + Arrays.equals(getName(), that.getName()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_); + } + /** *

Deletes underlying C++ iterator pointer.

* @@ -36,6 +96,9 @@ public class ColumnFamilyHandle extends RocksObject { } } + private native byte[] getName(final long handle); + private native int getID(final long handle); + private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException; @Override protected final native void disposeInternal(final long handle); private final RocksDB rocksDB_; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index b3890ed81..6c693cb44 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -53,6 +53,18 @@ public class ColumnFamilyOptions extends RocksObject this.compressionOptions_ = other.compressionOptions_; } + /** + *

Constructor to be used by + * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}, + * {@link ColumnFamilyDescriptor#columnFamilyOptions()} + * and also called via JNI.

+ * + * @param handle native handle to ColumnFamilyOptions instance. + */ + ColumnFamilyOptions(final long handle) { + super(handle); + } + /** *

Method to get a options instance by using pre-configured * property values. If one or many values are undefined in @@ -788,17 +800,6 @@ public class ColumnFamilyOptions extends RocksObject return forceConsistencyChecks(nativeHandle_); } - /** - *

Constructor to be used by - * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}

- * and also called via JNI. - * - * @param handle native handle to ColumnFamilyOptions instance. - */ - public ColumnFamilyOptions(final long handle) { - super(handle); - } - private static native long getColumnFamilyOptionsFromProps( String optString); diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index f42670190..572131199 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -269,7 +269,10 @@ public interface DBOptionsInterface { * Statistics objects should not be shared between DB instances as * it does not use any locks to prevent concurrent updates.

* + * @param statistics The statistics to set + * * @return the instance of the current object. + * * @see RocksDB#open(org.rocksdb.Options, String) */ T setStatistics(final Statistics statistics); @@ -277,7 +280,9 @@ public interface DBOptionsInterface { /** *

Returns statistics object.

* - * @return the instance of the statistics object or null if there is no statistics object. + * @return the instance of the statistics object or null if there is no + * statistics object. + * * @see #setStatistics(Statistics) */ Statistics statistics(); diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java new file mode 100644 index 000000000..1610dc739 --- /dev/null +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -0,0 +1,175 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Database with Transaction support. + */ +public class OptimisticTransactionDB extends RocksDB + implements TransactionalDB { + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ OptimisticTransactionDB + * object + */ + private OptimisticTransactionDB(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Open an OptimisticTransactionDB similar to + * {@link RocksDB#open(Options, String)}. + * + * @param options {@link org.rocksdb.Options} instance. + * @param path the path to the rocksdb. + * + * @return a {@link OptimisticTransactionDB} instance on success, null if the + * specified {@link OptimisticTransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static OptimisticTransactionDB open(final Options options, + final String path) throws RocksDBException { + final OptimisticTransactionDB otdb = new OptimisticTransactionDB(open( + options.nativeHandle_, path)); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + otdb.storeOptionsInstance(options); + + return otdb; + } + + /** + * Open an OptimisticTransactionDB similar to + * {@link RocksDB#open(DBOptions, String, List, List)}. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * + * @return a {@link OptimisticTransactionDB} instance on success, null if the + * specified {@link OptimisticTransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static OptimisticTransactionDB open(final DBOptions dbOptions, + final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; + } + + final long[] handles = open(dbOptions.nativeHandle_, path, cfNames, + cfOptionHandles); + final OptimisticTransactionDB otdb = + new OptimisticTransactionDB(handles[0]); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + otdb.storeOptionsInstance(dbOptions); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(otdb, handles[i])); + } + + return otdb; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_)); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final OptimisticTransactionOptions optimisticTransactionOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_, + optimisticTransactionOptions.nativeHandle_)); + } + + // TODO(AR) consider having beingTransaction(... oldTransaction) set a + // reference count inside Transaction, so that we can always call + // Transaction#close but the object is only disposed when there are as many + // closes as beginTransaction. Makes the try-with-resources paradigm easier for + // java developers + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final OptimisticTransactionOptions optimisticTransactionOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, optimisticTransactionOptions.nativeHandle_, + oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + /** + * Get the underlying database that was opened. + * + * @return The underlying database that was opened. + */ + public RocksDB getBaseDB() { + final RocksDB db = new RocksDB(getBaseDB(nativeHandle_)); + db.disOwnNativeHandle(); + return db; + } + + protected static native long open(final long optionsHandle, + final String path) throws RocksDBException; + protected static native long[] open(final long handle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native long beginTransaction(final long handle, + final long writeOptionsHandle); + private native long beginTransaction(final long handle, + final long writeOptionsHandle, + final long optimisticTransactionOptionsHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long oldTransactionHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, + final long optimisticTransactionOptionsHandle, + final long oldTransactionHandle); + private native long getBaseDB(final long handle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java new file mode 100644 index 000000000..650ee2255 --- /dev/null +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class OptimisticTransactionOptions extends RocksObject + implements TransactionalOptions { + + public OptimisticTransactionOptions() { + super(newOptimisticTransactionOptions()); + } + + @Override + public boolean isSetSnapshot() { + assert(isOwningHandle()); + return isSetSnapshot(nativeHandle_); + } + + @Override + public OptimisticTransactionOptions setSetSnapshot( + final boolean setSnapshot) { + assert(isOwningHandle()); + setSetSnapshot(nativeHandle_, setSnapshot); + return this; + } + + /** + * Should be set if the DB has a non-default comparator. + * See comment in + * {@link WriteBatchWithIndex#WriteBatchWithIndex(AbstractComparator, int, boolean)} + * constructor. + * + * @param comparator The comparator to use for the transaction. + * + * @return this OptimisticTransactionOptions instance + */ + public OptimisticTransactionOptions setComparator( + final AbstractComparator> comparator) { + assert(isOwningHandle()); + setComparator(nativeHandle_, comparator.nativeHandle_); + return this; + } + + private native static long newOptimisticTransactionOptions(); + private native boolean isSetSnapshot(final long handle); + private native void setSetSnapshot(final long handle, + final boolean setSnapshot); + private native void setComparator(final long handle, + final long comparatorHandle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 592c7f9ad..3b398631d 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -435,7 +435,7 @@ public class RocksDB extends RocksObject { path)); } - private void storeOptionsInstance(DBOptionsInterface options) { + protected void storeOptionsInstance(DBOptionsInterface options) { options_ = options; } @@ -1683,7 +1683,7 @@ public class RocksDB extends RocksObject { * @return The handle of the default column family */ public ColumnFamilyHandle getDefaultColumnFamily() { - ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this, + final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this, getDefaultColumnFamily(nativeHandle_)); cfHandle.disOwnNativeHandle(); return cfHandle; @@ -2359,8 +2359,9 @@ public class RocksDB extends RocksObject { final long[] columnFamilyHandles, final long readOptHandle) throws RocksDBException; protected native long getSnapshot(long nativeHandle); - protected native void releaseSnapshot(long nativeHandle, long snapshotHandle); - @Override protected final native void disposeInternal(final long handle); + protected native void releaseSnapshot( + long nativeHandle, long snapshotHandle); + @Override protected native void disposeInternal(final long handle); private native long getDefaultColumnFamily(long handle); private native long createColumnFamily(final long handle, final byte[] columnFamilyName, final long columnFamilyOptions) diff --git a/java/src/main/java/org/rocksdb/Snapshot.java b/java/src/main/java/org/rocksdb/Snapshot.java index a6b53f495..39cdf0c2d 100644 --- a/java/src/main/java/org/rocksdb/Snapshot.java +++ b/java/src/main/java/org/rocksdb/Snapshot.java @@ -11,6 +11,10 @@ package org.rocksdb; public class Snapshot extends RocksObject { Snapshot(final long nativeHandle) { super(nativeHandle); + + // The pointer to the snapshot is always released + // by the database instance. + disOwnNativeHandle(); } /** @@ -20,17 +24,17 @@ public class Snapshot extends RocksObject { * this snapshot. */ public long getSequenceNumber() { - assert(isOwningHandle()); return getSequenceNumber(nativeHandle_); } - /** - * Dont release C++ Snapshot pointer. The pointer - * to the snapshot is released by the database - * instance. - */ @Override protected final void disposeInternal(final long handle) { + /** + * Nothing to release, we never own the pointer for a + * Snapshot. The pointer + * to the snapshot is released by the database + * instance. + */ } private native long getSequenceNumber(long handle); diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 10c072c89..0938a6d58 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -117,6 +117,8 @@ public class Statistics extends RocksObject { /** * Resets all ticker and histogram stats. + * + * @throws RocksDBException if an error occurs when resetting the statistics. */ public void reset() throws RocksDBException { assert(isOwningHandle()); @@ -126,6 +128,7 @@ public class Statistics extends RocksObject { /** * String representation of the statistic object. */ + @Override public String toString() { assert(isOwningHandle()); return toString(nativeHandle_); diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java new file mode 100644 index 000000000..c619bb105 --- /dev/null +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -0,0 +1,1761 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Provides BEGIN/COMMIT/ROLLBACK transactions. + * + * To use transactions, you must first create either an + * {@link OptimisticTransactionDB} or a {@link TransactionDB} + * + * To create a transaction, use + * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)} or + * {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * + * It is up to the caller to synchronize access to this object. + * + * See samples/src/main/java/OptimisticTransactionSample.java and + * samples/src/main/java/TransactionSample.java for some simple + * examples. + */ +public class Transaction extends RocksObject { + + private final RocksDB parent; + + /** + * Intentionally package private + * as this is called from + * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * or {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * + * @param parent This must be either {@link TransactionDB} or + * {@link OptimisticTransactionDB} + * @param transactionHandle The native handle to the underlying C++ + * transaction object + */ + Transaction(final RocksDB parent, final long transactionHandle) { + super(transactionHandle); + this.parent = parent; + } + + /** + * If a transaction has a snapshot set, the transaction will ensure that + * any keys successfully written(or fetched via {@link #getForUpdate}) have + * not been modified outside of this transaction since the time the snapshot + * was set. + * + * If a snapshot has not been set, the transaction guarantees that keys have + * not been modified since the time each key was first written (or fetched via + * {@link #getForUpdate}). + * + * Using {@link #setSnapshot()} will provide stricter isolation guarantees + * at the expense of potentially more transaction failures due to conflicts + * with other writes. + * + * Calling {@link #setSnapshot()} has no effect on keys written before this + * function has been called. + * + * {@link #setSnapshot()} may be called multiple times if you would like to + * change the snapshot used for different operations in this transaction. + * + * Calling {@link #setSnapshot()} will not affect the version of Data returned + * by get(...) methods. See {@link #get} for more details. + */ + public void setSnapshot() { + assert(isOwningHandle()); + setSnapshot(nativeHandle_); + } + + /** + * Similar to {@link #setSnapshot()}, but will not change the current snapshot + * until put/merge/delete/getForUpdate/multiGetForUpdate is called. + * By calling this function, the transaction will essentially call + * {@link #setSnapshot()} for you right before performing the next + * write/getForUpdate. + * + * Calling {@link #setSnapshotOnNextOperation()} will not affect what + * snapshot is returned by {@link #getSnapshot} until the next + * write/getForUpdate is executed. + * + * When the snapshot is created the notifier's snapshotCreated method will + * be called so that the caller can get access to the snapshot. + * + * This is an optimization to reduce the likelihood of conflicts that + * could occur in between the time {@link #setSnapshot()} is called and the + * first write/getForUpdate operation. i.e. this prevents the following + * race-condition: + * + * txn1->setSnapshot(); + * txn2->put("A", ...); + * txn2->commit(); + * txn1->getForUpdate(opts, "A", ...); * FAIL! + */ + public void setSnapshotOnNextOperation() { + assert(isOwningHandle()); + setSnapshotOnNextOperation(nativeHandle_); + } + + /** + * Similar to {@link #setSnapshot()}, but will not change the current snapshot + * until put/merge/delete/getForUpdate/multiGetForUpdate is called. + * By calling this function, the transaction will essentially call + * {@link #setSnapshot()} for you right before performing the next + * write/getForUpdate. + * + * Calling {@link #setSnapshotOnNextOperation()} will not affect what + * snapshot is returned by {@link #getSnapshot} until the next + * write/getForUpdate is executed. + * + * When the snapshot is created the + * {@link AbstractTransactionNotifier#snapshotCreated(Snapshot)} method will + * be called so that the caller can get access to the snapshot. + * + * This is an optimization to reduce the likelihood of conflicts that + * could occur in between the time {@link #setSnapshot()} is called and the + * first write/getForUpdate operation. i.e. this prevents the following + * race-condition: + * + * txn1->setSnapshot(); + * txn2->put("A", ...); + * txn2->commit(); + * txn1->getForUpdate(opts, "A", ...); * FAIL! + * + * @param transactionNotifier A handler for receiving snapshot notifications + * for the transaction + * + */ + public void setSnapshotOnNextOperation( + final AbstractTransactionNotifier transactionNotifier) { + assert(isOwningHandle()); + setSnapshotOnNextOperation(nativeHandle_, transactionNotifier.nativeHandle_); + } + + /** + * Returns the Snapshot created by the last call to {@link #setSnapshot()}. + * + * REQUIRED: The returned Snapshot is only valid up until the next time + * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called, + * {@link #clearSnapshot()} is called, or the Transaction is deleted. + * + * @return The snapshot or null if there is no snapshot + */ + public Snapshot getSnapshot() { + assert(isOwningHandle()); + final long snapshotNativeHandle = getSnapshot(nativeHandle_); + if(snapshotNativeHandle == 0) { + return null; + } else { + final Snapshot snapshot = new Snapshot(snapshotNativeHandle); + return snapshot; + } + } + + /** + * Clears the current snapshot (i.e. no snapshot will be 'set') + * + * This removes any snapshot that currently exists or is set to be created + * on the next update operation ({@link #setSnapshotOnNextOperation()}). + * + * Calling {@link #clearSnapshot()} has no effect on keys written before this + * function has been called. + * + * If a reference to a snapshot was retrieved via {@link #getSnapshot()}, it + * will no longer be valid and should be discarded after a call to + * {@link #clearSnapshot()}. + */ + public void clearSnapshot() { + assert(isOwningHandle()); + clearSnapshot(nativeHandle_); + } + + /** + * Prepare the current transaction for 2PC + */ + void prepare() throws RocksDBException { + //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit) + assert(isOwningHandle()); + prepare(nativeHandle_); + } + + /** + * Write all batched keys to the db atomically. + * + * Returns OK on success. + * + * May return any error status that could be returned by DB:Write(). + * + * If this transaction was created by an {@link OptimisticTransactionDB} + * Status::Busy() may be returned if the transaction could not guarantee + * that there are no write conflicts. Status::TryAgain() may be returned + * if the memtable history size is not large enough + * (See max_write_buffer_number_to_maintain). + * + * If this transaction was created by a {@link TransactionDB}, + * Status::Expired() may be returned if this transaction has lived for + * longer than {@link TransactionOptions#getExpiration()}. + * + * @throws RocksDBException if an error occurs when committing the transaction + */ + public void commit() throws RocksDBException { + assert(isOwningHandle()); + commit(nativeHandle_); + } + + /** + * Discard all batched writes in this transaction. + * + * @throws RocksDBException if an error occurs when rolling back the transaction + */ + public void rollback() throws RocksDBException { + assert(isOwningHandle()); + rollback(nativeHandle_); + } + + /** + * Records the state of the transaction for future calls to + * {@link #rollbackToSavePoint()}. + * + * May be called multiple times to set multiple save points. + * + * @throws RocksDBException if an error occurs whilst setting a save point + */ + public void setSavePoint() throws RocksDBException { + assert(isOwningHandle()); + setSavePoint(nativeHandle_); + } + + /** + * Undo all operations in this transaction (put, merge, delete, putLogData) + * since the most recent call to {@link #setSavePoint()} and removes the most + * recent {@link #setSavePoint()}. + * + * If there is no previous call to {@link #setSavePoint()}, + * returns Status::NotFound() + * + * @throws RocksDBException if an error occurs when rolling back to a save point + */ + public void rollbackToSavePoint() throws RocksDBException { + assert(isOwningHandle()); + rollbackToSavePoint(nativeHandle_); + } + + /** + * This function is similar to + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + return get(nativeHandle_, readOptions.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * This function is similar to + * {@link RocksDB#get(ReadOptions, byte[])} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public byte[] get(final ReadOptions readOptions, final byte[] key) + throws RocksDBException { + assert(isOwningHandle()); + return get(nativeHandle_, readOptions.nativeHandle_, key, key.length); + } + + /** + * This function is similar to + * {@link RocksDB#multiGet(ReadOptions, List, List)} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + public byte[][] multiGet(final ReadOptions readOptions, + final List columnFamilyHandles, + final byte[][] keys) throws RocksDBException { + assert(isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.length != columnFamilyHandles.size()) { + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + if(keys.length == 0) { + return new byte[0][0]; + } + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + + return multiGet(nativeHandle_, readOptions.nativeHandle_, + keys, cfHandles); + } + + /** + * This function is similar to + * {@link RocksDB#multiGet(ReadOptions, List)} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options.= + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[][] multiGet(final ReadOptions readOptions, + final byte[][] keys) throws RocksDBException { + assert(isOwningHandle()); + if(keys.length == 0) { + return new byte[0][0]; + } + + return multiGet(nativeHandle_, readOptions.nativeHandle_, + keys); + } + + /** + * Read this key and ensure that this transaction will only + * be able to be committed if this key is not written outside this + * transaction after it has first been read (or after the snapshot if a + * snapshot is set in this transaction). The transaction behavior is the + * same regardless of whether the key exists or not. + * + * Note: Currently, this function will return Status::MergeInProgress + * if the most recent write to the queried key in this batch is a Merge. + * + * The values returned by this function are similar to + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. + * If value==nullptr, then this function will not read any data, but will + * still ensure that this key cannot be written to by outside of this + * transaction. + * + * If this transaction was created by an {@link OptimisticTransactionDB}, + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} + * could cause {@link #commit()} to fail. Otherwise, it could return any error + * that could be returned by + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. + * + * If this transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * {@link Status.Code#MergeInProgress} if merge operations cannot be + * resolved. + * + * @param readOptions Read options. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value for. + * @param exclusive true if the transaction should have exclusive access to + * the key, otherwise false for shared access. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] getForUpdate(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final boolean exclusive) throws RocksDBException { + assert(isOwningHandle()); + return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, + key.length, columnFamilyHandle.nativeHandle_, exclusive); + } + + /** + * Read this key and ensure that this transaction will only + * be able to be committed if this key is not written outside this + * transaction after it has first been read (or after the snapshot if a + * snapshot is set in this transaction). The transaction behavior is the + * same regardless of whether the key exists or not. + * + * Note: Currently, this function will return Status::MergeInProgress + * if the most recent write to the queried key in this batch is a Merge. + * + * The values returned by this function are similar to + * {@link RocksDB#get(ReadOptions, byte[])}. + * If value==nullptr, then this function will not read any data, but will + * still ensure that this key cannot be written to by outside of this + * transaction. + * + * If this transaction was created on an {@link OptimisticTransactionDB}, + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} + * could cause {@link #commit()} to fail. Otherwise, it could return any error + * that could be returned by + * {@link RocksDB#get(ReadOptions, byte[])}. + * + * If this transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * {@link Status.Code#MergeInProgress} if merge operations cannot be + * resolved. + * + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * @param exclusive true if the transaction should have exclusive access to + * the key, otherwise false for shared access. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] getForUpdate(final ReadOptions readOptions, final byte[] key, + final boolean exclusive) throws RocksDBException { + assert(isOwningHandle()); + return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, + key.length, exclusive); + } + + /** + * A multi-key version of + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} + * instances + * @param keys the keys to retrieve the values for. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[][] multiGetForUpdate(final ReadOptions readOptions, + final List columnFamilyHandles, + final byte[][] keys) throws RocksDBException { + assert(isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.length != columnFamilyHandles.size()){ + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + if(keys.length == 0) { + return new byte[0][0]; + } + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + return multiGetForUpdate(nativeHandle_, readOptions.nativeHandle_, + keys, cfHandles); + } + + /** + * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param keys the keys to retrieve the values for. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[][] multiGetForUpdate(final ReadOptions readOptions, + final byte[][] keys) throws RocksDBException { + assert(isOwningHandle()); + if(keys.length == 0) { + return new byte[0][0]; + } + + return multiGetForUpdate(nativeHandle_, + readOptions.nativeHandle_, keys); + } + + /** + * Returns an iterator that will iterate on all keys in the default + * column family including both keys in the DB and uncommitted keys in this + * transaction. + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read + * from the DB but will NOT change which keys are read from this transaction + * (the keys in this transaction do not yet belong to any snapshot and will be + * fetched regardless). + * + * Caller is responsible for deleting the returned Iterator. + * + * The returned iterator is only valid until {@link #commit()}, + * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. + * + * @param readOptions Read options. + * + * @return instance of iterator object. + */ + public RocksIterator getIterator(final ReadOptions readOptions) { + assert(isOwningHandle()); + return new RocksIterator(parent, getIterator(nativeHandle_, + readOptions.nativeHandle_)); + } + + /** + * Returns an iterator that will iterate on all keys in the default + * column family including both keys in the DB and uncommitted keys in this + * transaction. + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read + * from the DB but will NOT change which keys are read from this transaction + * (the keys in this transaction do not yet belong to any snapshot and will be + * fetched regardless). + * + * Caller is responsible for calling {@link RocksIterator#close()} on + * the returned Iterator. + * + * The returned iterator is only valid until {@link #commit()}, + * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. + * + * @param readOptions Read options. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * + * @return instance of iterator object. + */ + public RocksIterator getIterator(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle) { + assert(isOwningHandle()); + return new RocksIterator(parent, getIterator(nativeHandle_, + readOptions.nativeHandle_, columnFamilyHandle.nativeHandle_)); + } + + /** + * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[])} but allows + * you to specify the key and value in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, keyParts, keyParts.length, valueParts, valueParts.length, + columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #put(byte[], byte[])} but allows + * you to specify the key and value in several parts that will be + * concatenated together + * + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length); + } + + /** + * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to merge the key/value into + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#merge(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#delete(byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #delete(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #delete(byte[])} but allows + * you to specify key the in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link RocksDB#singleDelete(ColumnFamilyHandle, byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#singleDelete(byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #singleDelete(byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #put(ColumnFamilyHandle, byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #put(byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, key, key.length, value, value.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #putUntracked(ColumnFamilyHandle, byte[], byte[])} but + * allows you to specify the key and value in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length, columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #putUntracked(byte[], byte[])} but + * allows you to specify the key and value in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length); + } + + /** + * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to merge the key/value into + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + mergeUntracked(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#merge(byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #merge(byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void mergeUntracked(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + mergeUntracked(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #delete(ColumnFamilyHandle, byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#delete(byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #delete(byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #deleteUntracked(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #deleteUntracked(byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link WriteBatch#putLogData(byte[])} + * + * @param blob binary object to be inserted + */ + public void putLogData(final byte[] blob) { + assert(isOwningHandle()); + putLogData(nativeHandle_, blob, blob.length); + } + + /** + * By default, all put/merge/delete operations will be indexed in the + * transaction so that get/getForUpdate/getIterator can search for these + * keys. + * + * If the caller does not want to fetch the keys about to be written, + * they may want to avoid indexing as a performance optimization. + * Calling {@link #disableIndexing()} will turn off indexing for all future + * put/merge/delete operations until {@link #enableIndexing()} is called. + * + * If a key is put/merge/deleted after {@link #disableIndexing()} is called + * and then is fetched via get/getForUpdate/getIterator, the result of the + * fetch is undefined. + */ + public void disableIndexing() { + assert(isOwningHandle()); + disableIndexing(nativeHandle_); + } + + /** + * Re-enables indexing after a previous call to {@link #disableIndexing()} + */ + public void enableIndexing() { + assert(isOwningHandle()); + enableIndexing(nativeHandle_); + } + + /** + * Returns the number of distinct Keys being tracked by this transaction. + * If this transaction was created by a {@link TransactionDB}, this is the + * number of keys that are currently locked by this transaction. + * If this transaction was created by an {@link OptimisticTransactionDB}, + * this is the number of keys that need to be checked for conflicts at commit + * time. + * + * @return the number of distinct Keys being tracked by this transaction + */ + public long getNumKeys() { + assert(isOwningHandle()); + return getNumKeys(nativeHandle_); + } + + /** + * Returns the number of puts that have been applied to this + * transaction so far. + * + * @return the number of puts that have been applied to this transaction + */ + public long getNumPuts() { + assert(isOwningHandle()); + return getNumPuts(nativeHandle_); + } + + /** + * Returns the number of deletes that have been applied to this + * transaction so far. + * + * @return the number of deletes that have been applied to this transaction + */ + public long getNumDeletes() { + assert(isOwningHandle()); + return getNumDeletes(nativeHandle_); + } + + /** + * Returns the number of merges that have been applied to this + * transaction so far. + * + * @return the number of merges that have been applied to this transaction + */ + public long getNumMerges() { + assert(isOwningHandle()); + return getNumMerges(nativeHandle_); + } + + /** + * Returns the elapsed time in milliseconds since this Transaction began. + * + * @return the elapsed time in milliseconds since this transaction began. + */ + public long getElapsedTime() { + assert(isOwningHandle()); + return getElapsedTime(nativeHandle_); + } + + /** + * Fetch the underlying write batch that contains all pending changes to be + * committed. + * + * Note: You should not write or delete anything from the batch directly and + * should only use the functions in the {@link Transaction} class to + * write to this transaction. + * + * @return The write batch + */ + public WriteBatchWithIndex getWriteBatch() { + assert(isOwningHandle()); + final WriteBatchWithIndex writeBatchWithIndex = + new WriteBatchWithIndex(getWriteBatch(nativeHandle_)); + return writeBatchWithIndex; + } + + /** + * Change the value of {@link TransactionOptions#getLockTimeout()} + * (in milliseconds) for this transaction. + * + * Has no effect on OptimisticTransactions. + * + * @param lockTimeout the timeout (in milliseconds) for locks used by this + * transaction. + */ + public void setLockTimeout(final long lockTimeout) { + assert(isOwningHandle()); + setLockTimeout(nativeHandle_, lockTimeout); + } + + /** + * Return the WriteOptions that will be used during {@link #commit()}. + * + * @return the WriteOptions that will be used + */ + public WriteOptions getWriteOptions() { + assert(isOwningHandle()); + final WriteOptions writeOptions = + new WriteOptions(getWriteOptions(nativeHandle_)); + return writeOptions; + } + + /** + * Reset the WriteOptions that will be used during {@link #commit()}. + * + * @param writeOptions The new WriteOptions + */ + public void setWriteOptions(final WriteOptions writeOptions) { + assert(isOwningHandle()); + setWriteOptions(nativeHandle_, writeOptions.nativeHandle_); + } + + /** + * If this key was previously fetched in this transaction using + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell + * the transaction that it no longer needs to do any conflict checking + * for this key. + * + * If a key has been fetched N times via + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an + * effect if it is also called N times. If this key has been written to in + * this transaction, {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} + * will have no effect. + * + * If {@link #setSavePoint()} has been called after the + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}, + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any + * effect. + * + * If this Transaction was created by an {@link OptimisticTransactionDB}, + * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect + * whether this key is conflict checked at commit time. + * If this Transaction was created by a {@link TransactionDB}, + * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release + * any held locks for this key. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value for. + */ + public void undoGetForUpdate(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) { + assert(isOwningHandle()); + undoGetForUpdate(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * If this key was previously fetched in this transaction using + * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling + * {@link #undoGetForUpdate(byte[])} will tell + * the transaction that it no longer needs to do any conflict checking + * for this key. + * + * If a key has been fetched N times via + * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then + * {@link #undoGetForUpdate(byte[])} will only have an + * effect if it is also called N times. If this key has been written to in + * this transaction, {@link #undoGetForUpdate(byte[])} + * will have no effect. + * + * If {@link #setSavePoint()} has been called after the + * {@link #getForUpdate(ReadOptions, byte[], boolean)}, + * {@link #undoGetForUpdate(byte[])} will not have any + * effect. + * + * If this Transaction was created by an {@link OptimisticTransactionDB}, + * calling {@link #undoGetForUpdate(byte[])} can affect + * whether this key is conflict checked at commit time. + * If this Transaction was created by a {@link TransactionDB}, + * calling {@link #undoGetForUpdate(byte[])} may release + * any held locks for this key. + * + * @param key the key to retrieve the value for. + */ + public void undoGetForUpdate(final byte[] key) { + assert(isOwningHandle()); + undoGetForUpdate(nativeHandle_, key, key.length); + } + + /** + * Adds the keys from the WriteBatch to the transaction + * + * @param writeBatch The write batch to read from + * + * @throws RocksDBException if an error occurs whilst rebuilding from the + * write batch. + */ + public void rebuildFromWriteBatch(final WriteBatch writeBatch) + throws RocksDBException { + assert(isOwningHandle()); + rebuildFromWriteBatch(nativeHandle_, writeBatch.nativeHandle_); + } + + /** + * Get the Commit time Write Batch. + * + * @return the commit time write batch. + */ + public WriteBatch getCommitTimeWriteBatch() { + assert(isOwningHandle()); + final WriteBatch writeBatch = + new WriteBatch(getCommitTimeWriteBatch(nativeHandle_)); + return writeBatch; + } + + /** + * Set the log number. + * + * @param logNumber the log number + */ + public void setLogNumber(final long logNumber) { + assert(isOwningHandle()); + setLogNumber(nativeHandle_, logNumber); + } + + /** + * Get the log number. + * + * @return the log number + */ + public long getLogNumber() { + assert(isOwningHandle()); + return getLogNumber(nativeHandle_); + } + + /** + * Set the name of the transaction. + * + * @param transactionName the name of the transaction + * + * @throws RocksDBException if an error occurs when setting the transaction + * name. + */ + public void setName(final String transactionName) throws RocksDBException { + assert(isOwningHandle()); + setName(nativeHandle_, transactionName); + } + + /** + * Get the name of the transaction. + * + * @return the name of the transaction + */ + public String getName() { + assert(isOwningHandle()); + return getName(nativeHandle_); + } + + /** + * Get the ID of the transaction. + * + * @return the ID of the transaction. + */ + public long getID() { + assert(isOwningHandle()); + return getID(nativeHandle_); + } + + /** + * Determine if a deadlock has been detected. + * + * @return true if a deadlock has been detected. + */ + public boolean isDeadlockDetect() { + assert(isOwningHandle()); + return isDeadlockDetect(nativeHandle_); + } + + /** + * Get the list of waiting transactions. + * + * @return The list of waiting transactions. + */ + public WaitingTransactions getWaitingTxns() { + assert(isOwningHandle()); + return getWaitingTxns(nativeHandle_); + } + + /** + * Get the execution status of the transaction. + * + * NOTE: The execution status of an Optimistic Transaction + * never changes. This is only useful for non-optimistic transactions! + * + * @return The execution status of the transaction + */ + public TransactionState getState() { + assert(isOwningHandle()); + return TransactionState.getTransactionState( + getState(nativeHandle_)); + } + + /** + * The globally unique id with which the transaction is identified. This id + * might or might not be set depending on the implementation. Similarly the + * implementation decides the point in lifetime of a transaction at which it + * assigns the id. Although currently it is the case, the id is not guaranteed + * to remain the same across restarts. + * + * @return the transaction id. + */ + @Experimental("NOTE: Experimental feature") + public long getId() { + assert(isOwningHandle()); + return getId(nativeHandle_); + } + + public enum TransactionState { + STARTED((byte)0), + AWAITING_PREPARE((byte)1), + PREPARED((byte)2), + AWAITING_COMMIT((byte)3), + COMMITED((byte)4), + AWAITING_ROLLBACK((byte)5), + ROLLEDBACK((byte)6), + LOCKS_STOLEN((byte)7); + + private final byte value; + + TransactionState(final byte value) { + this.value = value; + } + + /** + * Get TransactionState by byte value. + * + * @param value byte representation of TransactionState. + * + * @return {@link org.rocksdb.Transaction.TransactionState} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static TransactionState getTransactionState(final byte value) { + for (final TransactionState transactionState : TransactionState.values()) { + if (transactionState.value == value){ + return transactionState; + } + } + throw new IllegalArgumentException( + "Illegal value provided for TransactionState."); + } + } + + /** + * Called from C++ native method {@link #getWaitingTxns(long)} + * to construct a WaitingTransactions object. + * + * @param columnFamilyId The id of the {@link ColumnFamilyHandle} + * @param key The key + * @param transactionIds The transaction ids + * + * @return The waiting transactions + */ + private WaitingTransactions newWaitingTransactions( + final long columnFamilyId, final String key, + final long[] transactionIds) { + return new WaitingTransactions(columnFamilyId, key, transactionIds); + } + + public static class WaitingTransactions { + private final long columnFamilyId; + private final String key; + private final long[] transactionIds; + + private WaitingTransactions(final long columnFamilyId, final String key, + final long[] transactionIds) { + this.columnFamilyId = columnFamilyId; + this.key = key; + this.transactionIds = transactionIds; + } + + /** + * Get the Column Family ID. + * + * @return The column family ID + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * Get the key on which the transactions are waiting. + * + * @return The key + */ + public String getKey() { + return key; + } + + /** + * Get the IDs of the waiting transactions. + * + * @return The IDs of the waiting transactions + */ + public long[] getTransactionIds() { + return transactionIds; + } + } + + private native void setSnapshot(final long handle); + private native void setSnapshotOnNextOperation(final long handle); + private native void setSnapshotOnNextOperation(final long handle, + final long transactionNotifierHandle); + private native long getSnapshot(final long handle); + private native void clearSnapshot(final long handle); + private native void prepare(final long handle) throws RocksDBException; + private native void commit(final long handle) throws RocksDBException; + private native void rollback(final long handle) throws RocksDBException; + private native void setSavePoint(final long handle) throws RocksDBException; + private native void rollbackToSavePoint(final long handle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, + final byte key[], final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, + final byte key[], final int keyLen) throws RocksDBException; + private native byte[][] multiGet(final long handle, + final long readOptionsHandle, final byte[][] keys, + final long[] columnFamilyHandles) throws RocksDBException; + private native byte[][] multiGet(final long handle, + final long readOptionsHandle, final byte[][] keys) + throws RocksDBException; + private native byte[] getForUpdate(final long handle, + final long readOptionsHandle, final byte key[], final int keyLength, + final long columnFamilyHandle, final boolean exclusive) + throws RocksDBException; + private native byte[] getForUpdate(final long handle, + final long readOptionsHandle, final byte key[], final int keyLen, + final boolean exclusive) throws RocksDBException; + private native byte[][] multiGetForUpdate(final long handle, + final long readOptionsHandle, final byte[][] keys, + final long[] columnFamilyHandles) throws RocksDBException; + private native byte[][] multiGetForUpdate(final long handle, + final long readOptionsHandle, final byte[][] keys) + throws RocksDBException; + private native long getIterator(final long handle, + final long readOptionsHandle); + private native long getIterator(final long handle, + final long readOptionsHandle, final long columnFamilyHandle); + private native void put(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void put(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void put(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength, + final long columnFamilyHandle) throws RocksDBException; + private native void put(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength) + throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void delete(final long handle, final byte[][] keys, + final int keysLength, final long columnFamilyHandle) + throws RocksDBException; + private native void delete(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void singleDelete(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native void singleDelete(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void singleDelete(final long handle, final byte[][] keys, + final int keysLength, final long columnFamilyHandle) + throws RocksDBException; + private native void singleDelete(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void putUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void putUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void putUntracked(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength, + final long columnFamilyHandle) throws RocksDBException; + private native void putUntracked(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength) + throws RocksDBException; + private native void mergeUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void mergeUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[][] keys, + final int keysLength, final long columnFamilyHandle) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void putLogData(final long handle, final byte[] blob, + final int blobLength); + private native void disableIndexing(final long handle); + private native void enableIndexing(final long handle); + private native long getNumKeys(final long handle); + private native long getNumPuts(final long handle); + private native long getNumDeletes(final long handle); + private native long getNumMerges(final long handle); + private native long getElapsedTime(final long handle); + private native long getWriteBatch(final long handle); + private native void setLockTimeout(final long handle, final long lockTimeout); + private native long getWriteOptions(final long handle); + private native void setWriteOptions(final long handle, + final long writeOptionsHandle); + private native void undoGetForUpdate(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle); + private native void undoGetForUpdate(final long handle, final byte[] key, + final int keyLength); + private native void rebuildFromWriteBatch(final long handle, + final long writeBatchHandle) throws RocksDBException; + private native long getCommitTimeWriteBatch(final long handle); + private native void setLogNumber(final long handle, final long logNumber); + private native long getLogNumber(final long handle); + private native void setName(final long handle, final String name) + throws RocksDBException; + private native String getName(final long handle); + private native long getID(final long handle); + private native boolean isDeadlockDetect(final long handle); + private native WaitingTransactions getWaitingTxns(final long handle); + private native byte getState(final long handle); + private native long getId(final long handle); + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java new file mode 100644 index 000000000..fcecf3faf --- /dev/null +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -0,0 +1,354 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Database with Transaction support + */ +public class TransactionDB extends RocksDB + implements TransactionalDB { + + private TransactionDBOptions transactionDbOptions_; + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ TransactionDB object + */ + private TransactionDB(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Open a TransactionDB, similar to {@link RocksDB#open(Options, String)}. + * + * @param options {@link org.rocksdb.Options} instance. + * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions} + * instance. + * @param path the path to the rocksdb. + * + * @return a {@link TransactionDB} instance on success, null if the specified + * {@link TransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static TransactionDB open(final Options options, + final TransactionDBOptions transactionDbOptions, final String path) + throws RocksDBException { + final TransactionDB tdb = new TransactionDB(open(options.nativeHandle_, + transactionDbOptions.nativeHandle_, path)); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + tdb.storeOptionsInstance(options); + tdb.storeTransactionDbOptions(transactionDbOptions); + + return tdb; + } + + /** + * Open a TransactionDB, similar to + * {@link RocksDB#open(DBOptions, String, List, List)}. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance. + * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions} + * instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * + * @return a {@link TransactionDB} instance on success, null if the specified + * {@link TransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static TransactionDB open(final DBOptions dbOptions, + final TransactionDBOptions transactionDbOptions, + final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; + } + + final long[] handles = open(dbOptions.nativeHandle_, + transactionDbOptions.nativeHandle_, path, cfNames, cfOptionHandles); + final TransactionDB tdb = new TransactionDB(handles[0]); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + tdb.storeOptionsInstance(dbOptions); + tdb.storeTransactionDbOptions(transactionDbOptions); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(tdb, handles[i])); + } + + return tdb; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_)); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final TransactionOptions transactionOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_, transactionOptions.nativeHandle_)); + } + + // TODO(AR) consider having beingTransaction(... oldTransaction) set a + // reference count inside Transaction, so that we can always call + // Transaction#close but the object is only disposed when there are as many + // closes as beginTransaction. Makes the try-with-resources paradigm easier for + // java developers + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction) { + final long jtxnHandle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxnHandle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final TransactionOptions transactionOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, transactionOptions.nativeHandle_, + oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + public Transaction getTransactionByName(final String transactionName) { + final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName); + if(jtxnHandle == 0) { + return null; + } + + final Transaction txn = new Transaction(this, jtxnHandle); + + // this instance doesn't own the underlying C++ object + txn.disOwnNativeHandle(); + + return txn; + } + + public List getAllPreparedTransactions() { + final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_); + + final List txns = new ArrayList<>(); + for(final long jtxnHandle : jtxnHandles) { + final Transaction txn = new Transaction(this, jtxnHandle); + + // this instance doesn't own the underlying C++ object + txn.disOwnNativeHandle(); + + txns.add(txn); + } + return txns; + } + + public static class KeyLockInfo { + private final String key; + private final long[] transactionIDs; + private final boolean exclusive; + + public KeyLockInfo(final String key, final long transactionIDs[], + final boolean exclusive) { + this.key = key; + this.transactionIDs = transactionIDs; + this.exclusive = exclusive; + } + + /** + * Get the key. + * + * @return the key + */ + public String getKey() { + return key; + } + + /** + * Get the Transaction IDs. + * + * @return the Transaction IDs. + */ + public long[] getTransactionIDs() { + return transactionIDs; + } + + /** + * Get the Lock status. + * + * @return true if the lock is exclusive, false if the lock is shared. + */ + public boolean isExclusive() { + return exclusive; + } + } + + /** + * Returns map of all locks held. + * + * @return a map of all the locks held. + */ + public Map getLockStatusData() { + return getLockStatusData(nativeHandle_); + } + + /** + * Called from C++ native method {@link #getDeadlockInfoBuffer(long)} + * to construct a DeadlockInfo object. + * + * @param transactionID The transaction id + * @param columnFamilyId The id of the {@link ColumnFamilyHandle} + * @param waitingKey the key that we are waiting on + * @param exclusive true if the lock is exclusive, false if the lock is shared + * + * @return The waiting transactions + */ + private DeadlockInfo newDeadlockInfo( + final long transactionID, final long columnFamilyId, + final String waitingKey, final boolean exclusive) { + return new DeadlockInfo(transactionID, columnFamilyId, + waitingKey, exclusive); + } + + public static class DeadlockInfo { + private final long transactionID; + private final long columnFamilyId; + private final String waitingKey; + private final boolean exclusive; + + private DeadlockInfo(final long transactionID, final long columnFamilyId, + final String waitingKey, final boolean exclusive) { + this.transactionID = transactionID; + this.columnFamilyId = columnFamilyId; + this.waitingKey = waitingKey; + this.exclusive = exclusive; + } + + /** + * Get the Transaction ID. + * + * @return the transaction ID + */ + public long getTransactionID() { + return transactionID; + } + + /** + * Get the Column Family ID. + * + * @return The column family ID + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * Get the key that we are waiting on. + * + * @return the key that we are waiting on + */ + public String getWaitingKey() { + return waitingKey; + } + + /** + * Get the Lock status. + * + * @return true if the lock is exclusive, false if the lock is shared. + */ + public boolean isExclusive() { + return exclusive; + } + } + + public static class DeadlockPath { + final DeadlockInfo[] path; + final boolean limitExceeded; + + public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) { + this.path = path; + this.limitExceeded = limitExceeded; + } + + public boolean isEmpty() { + return path.length == 0 && !limitExceeded; + } + } + + public DeadlockPath[] getDeadlockInfoBuffer() { + return getDeadlockInfoBuffer(nativeHandle_); + } + + public void setDeadlockInfoBufferSize(final int targetSize) { + setDeadlockInfoBufferSize(nativeHandle_, targetSize); + } + + private void storeTransactionDbOptions( + final TransactionDBOptions transactionDbOptions) { + this.transactionDbOptions_ = transactionDbOptions; + } + + private static native long open(final long optionsHandle, + final long transactionDbOptionsHandle, final String path) + throws RocksDBException; + private static native long[] open(final long dbOptionsHandle, + final long transactionDbOptionsHandle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native long beginTransaction(final long handle, + final long writeOptionsHandle); + private native long beginTransaction(final long handle, + final long writeOptionsHandle, final long transactionOptionsHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long oldTransactionHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long transactionOptionsHandle, + final long oldTransactionHandle); + private native long getTransactionByName(final long handle, + final String name); + private native long[] getAllPreparedTransactions(final long handle); + private native Map getLockStatusData( + final long handle); + private native DeadlockPath[] getDeadlockInfoBuffer(final long handle); + private native void setDeadlockInfoBufferSize(final long handle, + final int targetSize); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java new file mode 100644 index 000000000..76f545cde --- /dev/null +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -0,0 +1,217 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class TransactionDBOptions extends RocksObject { + + public TransactionDBOptions() { + super(newTransactionDBOptions()); + } + + /** + * Specifies the maximum number of keys that can be locked at the same time + * per column family. + * + * If the number of locked keys is greater than {@link #getMaxNumLocks()}, + * transaction writes (or GetForUpdate) will return an error. + * + * @return The maximum number of keys that can be locked + */ + public long getMaxNumLocks() { + assert(isOwningHandle()); + return getMaxNumLocks(nativeHandle_); + } + + /** + * Specifies the maximum number of keys that can be locked at the same time + * per column family. + * + * If the number of locked keys is greater than {@link #getMaxNumLocks()}, + * transaction writes (or GetForUpdate) will return an error. + * + * @param maxNumLocks The maximum number of keys that can be locked; + * If this value is not positive, no limit will be enforced. + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setMaxNumLocks(final long maxNumLocks) { + assert(isOwningHandle()); + setMaxNumLocks(nativeHandle_, maxNumLocks); + return this; + } + + /** + * The number of sub-tables per lock table (per column family) + * + * @return The number of sub-tables + */ + public long getNumStripes() { + assert(isOwningHandle()); + return getNumStripes(nativeHandle_); + } + + /** + * Increasing this value will increase the concurrency by dividing the lock + * table (per column family) into more sub-tables, each with their own + * separate mutex. + * + * Default: 16 + * + * @param numStripes The number of sub-tables + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setNumStripes(final long numStripes) { + assert(isOwningHandle()); + setNumStripes(nativeHandle_, numStripes); + return this; + } + + /** + * The default wait timeout in milliseconds when + * a transaction attempts to lock a key if not specified by + * {@link TransactionOptions#setLockTimeout(long)} + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout. + * + * @return the default wait timeout in milliseconds + */ + public long getTransactionLockTimeout() { + assert(isOwningHandle()); + return getTransactionLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the default wait timeout in milliseconds when + * a transaction attempts to lock a key if not specified by + * {@link TransactionOptions#setLockTimeout(long)} + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout. Not using a timeout is not recommended + * as it can lead to deadlocks. Currently, there is no deadlock-detection to + * recover from a deadlock. + * + * Default: 1000 + * + * @param transactionLockTimeout the default wait timeout in milliseconds + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setTransactionLockTimeout( + final long transactionLockTimeout) { + assert(isOwningHandle()); + setTransactionLockTimeout(nativeHandle_, transactionLockTimeout); + return this; + } + + /** + * The wait timeout in milliseconds when writing a key + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, + * {@link RocksDB#merge}, {@link RocksDB#remove} or {@link RocksDB#write} + * directly). + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout and will block indefinitely when acquiring + * a lock. + * + * @return the timeout in milliseconds when writing a key OUTSIDE of a + * transaction + */ + public long getDefaultLockTimeout() { + assert(isOwningHandle()); + return getDefaultLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the wait timeout in milliseconds when writing a key + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, + * {@link RocksDB#merge}, {@link RocksDB#remove} or {@link RocksDB#write} + * directly). + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout and will block indefinitely when acquiring + * a lock. + * + * Not using a timeout can lead to deadlocks. Currently, there + * is no deadlock-detection to recover from a deadlock. While DB writes + * cannot deadlock with other DB writes, they can deadlock with a transaction. + * A negative timeout should only be used if all transactions have a small + * expiration set. + * + * Default: 1000 + * + * @param defaultLockTimeout the timeout in milliseconds when writing a key + * OUTSIDE of a transaction + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setDefaultLockTimeout( + final long defaultLockTimeout) { + assert(isOwningHandle()); + setDefaultLockTimeout(nativeHandle_, defaultLockTimeout); + return this; + } + +// /** +// * If set, the {@link TransactionDB} will use this implementation of a mutex +// * and condition variable for all transaction locking instead of the default +// * mutex/condvar implementation. +// * +// * @param transactionDbMutexFactory the mutex factory for the transactions +// * +// * @return this TransactionDBOptions instance +// */ +// public TransactionDBOptions setCustomMutexFactory( +// final TransactionDBMutexFactory transactionDbMutexFactory) { +// +// } + + /** + * The policy for when to write the data into the DB. The default policy is to + * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}. + * The data could be written before the commit phase. The DB then needs to + * provide the mechanisms to tell apart committed from uncommitted data. + * + * @return The write policy. + */ + public TxnDBWritePolicy getWritePolicy() { + assert(isOwningHandle()); + return TxnDBWritePolicy.getTxnDBWritePolicy(getWritePolicy(nativeHandle_)); + } + + /** + * The policy for when to write the data into the DB. The default policy is to + * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}. + * The data could be written before the commit phase. The DB then needs to + * provide the mechanisms to tell apart committed from uncommitted data. + * + * @param writePolicy The write policy. + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setWritePolicy( + final TxnDBWritePolicy writePolicy) { + assert(isOwningHandle()); + setWritePolicy(nativeHandle_, writePolicy.getValue()); + return this; + } + + private native static long newTransactionDBOptions(); + private native long getMaxNumLocks(final long handle); + private native void setMaxNumLocks(final long handle, + final long maxNumLocks); + private native long getNumStripes(final long handle); + private native void setNumStripes(final long handle, final long numStripes); + private native long getTransactionLockTimeout(final long handle); + private native void setTransactionLockTimeout(final long handle, + final long transactionLockTimeout); + private native long getDefaultLockTimeout(final long handle); + private native void setDefaultLockTimeout(final long handle, + final long transactionLockTimeout); + private native byte getWritePolicy(final long handle); + private native void setWritePolicy(final long handle, final byte writePolicy); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java new file mode 100644 index 000000000..2c63bf723 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -0,0 +1,189 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class TransactionOptions extends RocksObject + implements TransactionalOptions { + + public TransactionOptions() { + super(newTransactionOptions()); + } + + @Override + public boolean isSetSnapshot() { + assert(isOwningHandle()); + return isSetSnapshot(nativeHandle_); + } + + @Override + public TransactionOptions setSetSnapshot(final boolean setSnapshot) { + assert(isOwningHandle()); + setSetSnapshot(nativeHandle_, setSnapshot); + return this; + } + + /** + * True means that before acquiring locks, this transaction will + * check if doing so will cause a deadlock. If so, it will return with + * {@link Status.Code#Busy}. The user should retry their transaction. + * + * @return true if a deadlock is detected. + */ + public boolean isDeadlockDetect() { + assert(isOwningHandle()); + return isDeadlockDetect(nativeHandle_); + } + + /** + * Setting to true means that before acquiring locks, this transaction will + * check if doing so will cause a deadlock. If so, it will return with + * {@link Status.Code#Busy}. The user should retry their transaction. + * + * @param deadlockDetect true if we should detect deadlocks. + * + * @return this TransactionOptions instance + */ + public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) { + assert(isOwningHandle()); + setDeadlockDetect(nativeHandle_, deadlockDetect); + return this; + } + + /** + * The wait timeout in milliseconds when a transaction attempts to lock a key. + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * will be used + * + * @return the lock tiemout in milliseconds + */ + public long getLockTimeout() { + assert(isOwningHandle()); + return getLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the wait timeout in milliseconds when + * a transaction attempts to lock a key. + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * will be used + * + * Default: -1 + * + * @param lockTimeout the lock tiemout in milliseconds + * + * @return this TransactionOptions instance + */ + public TransactionOptions setLockTimeout(final long lockTimeout) { + assert(isOwningHandle()); + setLockTimeout(nativeHandle_, lockTimeout); + return this; + } + + /** + * Expiration duration in milliseconds. + * + * If non-negative, transactions that last longer than this many milliseconds + * will fail to commit. If not set, a forgotten transaction that is never + * committed, rolled back, or deleted will never relinquish any locks it + * holds. This could prevent keys from being written by other writers. + * + * @return expiration the expiration duration in milliseconds + */ + public long getExpiration() { + assert(isOwningHandle()); + return getExpiration(nativeHandle_); + } + + /** + * Expiration duration in milliseconds. + * + * If non-negative, transactions that last longer than this many milliseconds + * will fail to commit. If not set, a forgotten transaction that is never + * committed, rolled back, or deleted will never relinquish any locks it + * holds. This could prevent keys from being written by other writers. + * + * Default: -1 + * + * @param expiration the expiration duration in milliseconds + * + * @return this TransactionOptions instance + */ + public TransactionOptions setExpiration(final long expiration) { + assert(isOwningHandle()); + setExpiration(nativeHandle_, expiration); + return this; + } + + /** + * Gets the number of traversals to make during deadlock detection. + * + * @return the number of traversals to make during + * deadlock detection + */ + public long getDeadlockDetectDepth() { + return getDeadlockDetectDepth(nativeHandle_); + } + + /** + * Sets the number of traversals to make during deadlock detection. + * + * Default: 50 + * + * @param deadlockDetectDepth the the number of traversals to make during + * deadlock detection + * + * @return this TransactionOptions instance + */ + public TransactionOptions setDeadlockDetectDepth( + final long deadlockDetectDepth) { + setDeadlockDetectDepth(nativeHandle_, deadlockDetectDepth); + return this; + } + + /** + * Get the maximum number of bytes that may be used for the write batch. + * + * @return the maximum number of bytes, 0 means no limit. + */ + public long getMaxWriteBatchSize() { + return getMaxWriteBatchSize(nativeHandle_); + } + + /** + * Set the maximum number of bytes that may be used for the write batch. + * + * @param maxWriteBatchSize the maximum number of bytes, 0 means no limit. + * + * @return this TransactionOptions instance + */ + public TransactionOptions setMaxWriteBatchSize(final long maxWriteBatchSize) { + setMaxWriteBatchSize(nativeHandle_, maxWriteBatchSize); + return this; + } + + private native static long newTransactionOptions(); + private native boolean isSetSnapshot(final long handle); + private native void setSetSnapshot(final long handle, + final boolean setSnapshot); + private native boolean isDeadlockDetect(final long handle); + private native void setDeadlockDetect(final long handle, + final boolean deadlockDetect); + private native long getLockTimeout(final long handle); + private native void setLockTimeout(final long handle, final long lockTimeout); + private native long getExpiration(final long handle); + private native void setExpiration(final long handle, final long expiration); + private native long getDeadlockDetectDepth(final long handle); + private native void setDeadlockDetectDepth(final long handle, + final long deadlockDetectDepth); + private native long getMaxWriteBatchSize(final long handle); + private native void setMaxWriteBatchSize(final long handle, + final long maxWriteBatchSize); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/TransactionalDB.java b/java/src/main/java/org/rocksdb/TransactionalDB.java new file mode 100644 index 000000000..3f0eceda8 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TransactionalDB.java @@ -0,0 +1,68 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +interface TransactionalDB + extends AutoCloseable { + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @return a new transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param transactionOptions Any options for the transaction + * @return a new transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final T transactionOptions); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param oldTransaction this Transaction will be reused instead of allocating + * a new one. This is an optimization to avoid extra allocations + * when repeatedly creating transactions. + * @return The oldTransaction which has been reinitialized as a new + * transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param transactionOptions Any options for the transaction + * @param oldTransaction this Transaction will be reused instead of allocating + * a new one. This is an optimization to avoid extra allocations + * when repeatedly creating transactions. + * @return The oldTransaction which has been reinitialized as a new + * transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final T transactionOptions, final Transaction oldTransaction); +} diff --git a/java/src/main/java/org/rocksdb/TransactionalOptions.java b/java/src/main/java/org/rocksdb/TransactionalOptions.java new file mode 100644 index 000000000..87aaa7986 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TransactionalOptions.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +interface TransactionalOptions extends AutoCloseable { + + /** + * True indicates snapshots will be set, just like if + * {@link Transaction#setSnapshot()} had been called + * + * @return whether a snapshot will be set + */ + boolean isSetSnapshot(); + + /** + * Setting the setSnapshot to true is the same as calling + * {@link Transaction#setSnapshot()}. + * + * Default: false + * + * @param The type of transactional options. + * @param setSnapshot Whether to set a snapshot + * + * @return this TransactionalOptions instance + */ + T setSetSnapshot(final boolean setSnapshot); +} diff --git a/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java new file mode 100644 index 000000000..837ce6157 --- /dev/null +++ b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * The transaction db write policy. + */ +public enum TxnDBWritePolicy { + /** + * Write only the committed data. + */ + WRITE_COMMITTED((byte)0x00), + + /** + * Write data after the prepare phase of 2pc. + */ + WRITE_PREPARED((byte)0x1), + + /** + * Write data before the prepare phase of 2pc. + */ + WRITE_UNPREPARED((byte)0x2); + + private byte value; + + TxnDBWritePolicy(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the TxnDBWritePolicy enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of TxnDBWritePolicy. + * + * @return TxnDBWritePolicy instance. + * + * @throws IllegalArgumentException If TxnDBWritePolicy cannot be found for + * the provided byteIdentifier + */ + public static TxnDBWritePolicy getTxnDBWritePolicy(final byte byteIdentifier) { + for (final TxnDBWritePolicy txnDBWritePolicy : TxnDBWritePolicy.values()) { + if (txnDBWritePolicy.getValue() == byteIdentifier) { + return txnDBWritePolicy; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for TxnDBWritePolicy."); + } +} diff --git a/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java index d3fc47b63..d8b9eeced 100644 --- a/java/src/main/java/org/rocksdb/WALRecoveryMode.java +++ b/java/src/main/java/org/rocksdb/WALRecoveryMode.java @@ -65,7 +65,7 @@ public enum WALRecoveryMode { * * @param byteIdentifier of WALRecoveryMode. * - * @return CompressionType instance. + * @return WALRecoveryMode instance. * * @throws IllegalArgumentException If WALRecoveryMode cannot be found for the * provided byteIdentifier diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index f3d49c92e..e9c3dd9e5 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -64,6 +64,18 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { fallbackIndexComparator instanceof DirectComparator, reservedBytes, overwriteKey)); } + /** + *

Private WriteBatchWithIndex constructor which is used to construct + * WriteBatchWithIndex instances from C++ side. As the reference to this + * object is also managed from C++ side the handle will be disowned.

+ * + * @param nativeHandle address of native instance. + */ + WriteBatchWithIndex(final long nativeHandle) { + super(nativeHandle); + disOwnNativeHandle(); + } + /** * Create an iterator of a column family. User can call * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index f3c5aa667..db662aa50 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -20,6 +20,12 @@ public class WriteOptions extends RocksObject { } + // TODO(AR) consider ownership + WriteOptions(final long nativeHandle) { + super(nativeHandle); + disOwnNativeHandle(); + } + /** * Copy constructor for WriteOptions. * diff --git a/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java new file mode 100644 index 000000000..08f3dbf58 --- /dev/null +++ b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java @@ -0,0 +1,903 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +/** + * Base class of {@link TransactionTest} and {@link OptimisticTransactionTest} + */ +public abstract class AbstractTransactionTest { + + protected final static byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf" + .getBytes(); + + protected static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + public abstract DBContainer startDb() + throws RocksDBException; + + @Test + public void setSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + } + } + + @Test + public void setSnapshotOnNextOperation() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshotOnNextOperation(); + txn.put("key1".getBytes(), "value1".getBytes()); + } + } + + @Test + public void setSnapshotOnNextOperation_transactionNotifier() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + try(final TestTransactionNotifier notifier = new TestTransactionNotifier()) { + txn.setSnapshotOnNextOperation(notifier); + txn.put("key1".getBytes(), "value1".getBytes()); + + txn.setSnapshotOnNextOperation(notifier); + txn.put("key2".getBytes(), "value2".getBytes()); + + assertThat(notifier.getCreatedSnapshots().size()).isEqualTo(2); + } + } + } + + @Test + public void getSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + final Snapshot snapshot = txn.getSnapshot(); + assertThat(snapshot.isOwningHandle()).isFalse(); + } + } + + @Test + public void getSnapshot_null() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final Snapshot snapshot = txn.getSnapshot(); + assertThat(snapshot).isNull(); + } + } + + @Test + public void clearSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + txn.clearSnapshot(); + } + } + + @Test + public void clearSnapshot_none() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.clearSnapshot(); + } + } + + @Test + public void commit() throws RocksDBException { + final byte k1[] = "rollback-key1".getBytes(UTF_8); + final byte v1[] = "rollback-value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + try(final ReadOptions readOptions = new ReadOptions(); + final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isEqualTo(v1); + } + } + } + + @Test + public void rollback() throws RocksDBException { + final byte k1[] = "rollback-key1".getBytes(UTF_8); + final byte v1[] = "rollback-value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.rollback(); + } + + try(final ReadOptions readOptions = new ReadOptions(); + final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isNull(); + } + } + } + + @Test + public void savePoint() throws RocksDBException { + final byte k1[] = "savePoint-key1".getBytes(UTF_8); + final byte v1[] = "savePoint-value1".getBytes(UTF_8); + final byte k2[] = "savePoint-key2".getBytes(UTF_8); + final byte v2[] = "savePoint-value2".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.setSavePoint(); + + txn.put(k2, v2); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isEqualTo(v2); + + txn.rollbackToSavePoint(); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isNull(); + + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn2.get(readOptions, k2)).isNull(); + } + } + } + + @Test + public void getPut_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void getPut() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isNull(); + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void multiGetPut_cf() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null }); + + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPut() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null }); + + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void getForUpdate_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + } + } + + @Test + public void getForUpdate() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getForUpdate(readOptions, k1, true)).isNull(); + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + } + } + + @Test + public void multiGetForUpdate_cf() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(new byte[][] { null, null }); + + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + } + } + + @Test + public void multiGetForUpdate() throws RocksDBException { + final byte keys[][] = new byte[][]{ + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][]{ + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(new byte[][]{null, null}); + + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void getIterator() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + txn.put(k1, v1); + + try(final RocksIterator iterator = txn.getIterator(readOptions)) { + iterator.seek(k1); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(k1); + assertThat(iterator.value()).isEqualTo(v1); + } + } + } + + @Test + public void getIterator_cf() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + txn.put(testCf, k1, v1); + + try(final RocksIterator iterator = txn.getIterator(readOptions, testCf)) { + iterator.seek(k1); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(k1); + assertThat(iterator.value()).isEqualTo(v1); + } + } + } + + @Test + public void merge_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.merge(testCf, k1, v1); + } + } + + @Test + public void merge() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.merge(k1, v1); + } + } + + + @Test + public void delete_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + + txn.delete(testCf, k1); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + } + } + + @Test + public void delete() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.delete(k1); + assertThat(txn.get(readOptions, k1)).isNull(); + } + } + + @Test + public void delete_parts_cf() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, keyParts, valueParts); + assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value); + + txn.delete(testCf, keyParts); + + assertThat(txn.get(testCf, readOptions, key)) + .isNull(); + } + } + + @Test + public void delete_parts() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(keyParts, valueParts); + + assertThat(txn.get(readOptions, key)).isEqualTo(value); + + txn.delete(keyParts); + + assertThat(txn.get(readOptions, key)).isNull(); + } + } + + @Test + public void getPutUntracked_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + txn.putUntracked(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void getPutUntracked() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isNull(); + txn.putUntracked(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void multiGetPutUntracked_cf() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null }); + txn.putUntracked(testCf, keys[0], values[0]); + txn.putUntracked(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPutUntracked() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null }); + txn.putUntracked(keys[0], values[0]); + txn.putUntracked(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void mergeUntracked_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.mergeUntracked(testCf, k1, v1); + } + } + + @Test + public void mergeUntracked() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.mergeUntracked(k1, v1); + } + } + + @Test + public void deleteUntracked_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + + txn.deleteUntracked(testCf, k1); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + } + } + + @Test + public void deleteUntracked() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.deleteUntracked(k1); + assertThat(txn.get(readOptions, k1)).isNull(); + } + } + + @Test + public void deleteUntracked_parts_cf() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, keyParts, valueParts); + assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value); + + txn.deleteUntracked(testCf, keyParts); + assertThat(txn.get(testCf, readOptions, key)).isNull(); + } + } + + @Test + public void deleteUntracked_parts() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keyParts, valueParts); + assertThat(txn.get(readOptions, key)).isEqualTo(value); + + txn.deleteUntracked(keyParts); + assertThat(txn.get(readOptions, key)).isNull(); + } + } + + @Test + public void putLogData() throws RocksDBException { + final byte[] blob = "blobby".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.putLogData(blob); + } + } + + @Test + public void enabledDisableIndexing() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.disableIndexing(); + txn.enableIndexing(); + txn.disableIndexing(); + txn.enableIndexing(); + } + } + + @Test + public void numKeys() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte k2[] = "key2".getBytes(UTF_8); + final byte v2[] = "value2".getBytes(UTF_8); + final byte k3[] = "key3".getBytes(UTF_8); + final byte v3[] = "value3".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(k1, v1); + txn.put(testCf, k2, v2); + txn.merge(k3, v3); + txn.delete(testCf, k2); + + assertThat(txn.getNumKeys()).isEqualTo(3); + assertThat(txn.getNumPuts()).isEqualTo(2); + assertThat(txn.getNumMerges()).isEqualTo(1); + assertThat(txn.getNumDeletes()).isEqualTo(1); + } + } + + @Test + public void elapsedTime() throws RocksDBException, InterruptedException { + final long preStartTxnTime = System.currentTimeMillis(); + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + Thread.sleep(1); + + final long txnElapsedTime = txn.getElapsedTime(); + assertThat(txnElapsedTime).isLessThan(System.currentTimeMillis() + - preStartTxnTime); + assertThat(txnElapsedTime).isGreaterThan(0); + } + } + + @Test + public void getWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + + final WriteBatchWithIndex writeBatch = txn.getWriteBatch(); + assertThat(writeBatch).isNotNull(); + assertThat(writeBatch.isOwningHandle()).isFalse(); + assertThat(writeBatch.count()).isEqualTo(1); + } + } + + @Test + public void setLockTimeout() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setLockTimeout(1000); + } + } + + @Test + public void writeOptions() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final WriteOptions writeOptions = new WriteOptions() + .setDisableWAL(true) + .setSync(true); + final Transaction txn = dbContainer.beginTransaction(writeOptions)) { + + txn.put(k1, v1); + + WriteOptions txnWriteOptions = txn.getWriteOptions(); + assertThat(txnWriteOptions).isNotNull(); + assertThat(txnWriteOptions.isOwningHandle()).isFalse(); + assertThat(txnWriteOptions).isNotSameAs(writeOptions); + assertThat(txnWriteOptions.disableWAL()).isTrue(); + assertThat(txnWriteOptions.sync()).isTrue(); + + txn.setWriteOptions(txnWriteOptions.setSync(false)); + txnWriteOptions = txn.getWriteOptions(); + assertThat(txnWriteOptions).isNotNull(); + assertThat(txnWriteOptions.isOwningHandle()).isFalse(); + assertThat(txnWriteOptions).isNotSameAs(writeOptions); + assertThat(txnWriteOptions.disableWAL()).isTrue(); + assertThat(txnWriteOptions.sync()).isFalse(); + } + } + + @Test + public void undoGetForUpdate_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + txn.undoGetForUpdate(testCf, k1); + } + } + + @Test + public void undoGetForUpdate() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getForUpdate(readOptions, k1, true)).isNull(); + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + txn.undoGetForUpdate(k1); + } + } + + @Test + public void rebuildFromWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte k2[] = "key2".getBytes(UTF_8); + final byte v2[] = "value2".getBytes(UTF_8); + final byte k3[] = "key3".getBytes(UTF_8); + final byte v3[] = "value3".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.getNumKeys()).isEqualTo(1); + + try(final WriteBatch writeBatch = new WriteBatch()) { + writeBatch.put(k2, v2); + writeBatch.put(k3, v3); + txn.rebuildFromWriteBatch(writeBatch); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isEqualTo(v2); + assertThat(txn.get(readOptions, k3)).isEqualTo(v3); + assertThat(txn.getNumKeys()).isEqualTo(3); + } + } + } + + @Test + public void getCommitTimeWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + final WriteBatch writeBatch = txn.getCommitTimeWriteBatch(); + + assertThat(writeBatch).isNotNull(); + assertThat(writeBatch.isOwningHandle()).isFalse(); + assertThat(writeBatch.count()).isEqualTo(0); + } + } + + @Test + public void logNumber() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getLogNumber()).isEqualTo(0); + final long logNumber = rand.nextLong(); + txn.setLogNumber(logNumber); + assertThat(txn.getLogNumber()).isEqualTo(logNumber); + } + } + + private static byte[] concat(final byte[][] bufs) { + int resultLength = 0; + for(final byte[] buf : bufs) { + resultLength += buf.length; + } + + final byte[] result = new byte[resultLength]; + int resultOffset = 0; + for(final byte[] buf : bufs) { + final int srcLength = buf.length; + System.arraycopy(buf, 0, result, resultOffset, srcLength); + resultOffset += srcLength; + } + + return result; + } + + private static class TestTransactionNotifier + extends AbstractTransactionNotifier { + private final List createdSnapshots = new ArrayList<>(); + + @Override + public void snapshotCreated(final Snapshot newSnapshot) { + createdSnapshots.add(newSnapshot); + } + + public List getCreatedSnapshots() { + return createdSnapshots; + } + } + + protected static abstract class DBContainer + implements AutoCloseable { + protected final WriteOptions writeOptions; + protected final List columnFamilyHandles; + protected final ColumnFamilyOptions columnFamilyOptions; + protected final DBOptions options; + + public DBContainer(final WriteOptions writeOptions, + final List columnFamilyHandles, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + this.writeOptions = writeOptions; + this.columnFamilyHandles = columnFamilyHandles; + this.columnFamilyOptions = columnFamilyOptions; + this.options = options; + } + + public abstract Transaction beginTransaction(); + + public abstract Transaction beginTransaction( + final WriteOptions writeOptions); + + public ColumnFamilyHandle getTestColumnFamily() { + return columnFamilyHandles.get(1); + } + + @Override + public abstract void close(); + } +} diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java index 19fe332df..3df63c65f 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -12,6 +12,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; public class ColumnFamilyTest { @@ -23,6 +24,31 @@ public class ColumnFamilyTest { @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + @Test + public void columnFamilyDescriptorName() throws RocksDBException { + final byte[] cfName = "some_name".getBytes(UTF_8); + + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { + final ColumnFamilyDescriptor cfDescriptor = + new ColumnFamilyDescriptor(cfName, cfOptions); + assertThat(cfDescriptor.getName()).isEqualTo(cfName); + } + } + + @Test + public void columnFamilyDescriptorOptions() throws RocksDBException { + final byte[] cfName = "some_name".getBytes(UTF_8); + + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() + .setCompressionType(CompressionType.BZLIB2_COMPRESSION)) { + final ColumnFamilyDescriptor cfDescriptor = + new ColumnFamilyDescriptor(cfName, cfOptions); + + assertThat(cfDescriptor.getOptions().compressionType()) + .isEqualTo(CompressionType.BZLIB2_COMPRESSION); + } + } + @Test public void listColumnFamilies() throws RocksDBException { try (final Options options = new Options().setCreateIfMissing(true); @@ -47,6 +73,9 @@ public class ColumnFamilyTest { try { assertThat(cfh).isNotNull(); + assertThat(cfh.getName()).isEqualTo("default".getBytes(UTF_8)); + assertThat(cfh.getID()).isEqualTo(0); + final byte[] key = "key".getBytes(); final byte[] value = "value".getBytes(); @@ -64,15 +93,25 @@ public class ColumnFamilyTest { @Test public void createColumnFamily() throws RocksDBException { + final byte[] cfName = "new_cf".getBytes(UTF_8); + final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, + new ColumnFamilyOptions()); + try (final Options options = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath())) { - final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf".getBytes(), - new ColumnFamilyOptions())); + dbFolder.getRoot().getAbsolutePath())) { + + final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor); + try { + assertThat(columnFamilyHandle.getName()).isEqualTo(cfName); + assertThat(columnFamilyHandle.getID()).isEqualTo(1); + + final ColumnFamilyDescriptor latestDescriptor = columnFamilyHandle.getDescriptor(); + assertThat(latestDescriptor.getName()).isEqualTo(cfName); + final List columnFamilyNames = RocksDB.listColumnFamilies( - options, dbFolder.getRoot().getAbsolutePath()); + options, dbFolder.getRoot().getAbsolutePath()); assertThat(columnFamilyNames).isNotNull(); assertThat(columnFamilyNames.size()).isGreaterThan(0); assertThat(columnFamilyNames.size()).isEqualTo(2); diff --git a/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java new file mode 100644 index 000000000..519b70b1d --- /dev/null +++ b/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java @@ -0,0 +1,131 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OptimisticTransactionDBTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void open() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(otdb).isNotNull(); + } + } + + @Test + public void open_columnFamilies() throws RocksDBException { + try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts)); + + final List columnFamilyHandles = new ArrayList<>(); + + try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try { + assertThat(otdb).isNotNull(); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void beginTransaction() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_transactionOptions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions, + optimisticTxnOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_withOld() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + final Transaction txnReused = otdb.beginTransaction(writeOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void beginTransaction_withOld_transactionOptions() + throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + final Transaction txnReused = otdb.beginTransaction(writeOptions, + optimisticTxnOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void baseDB() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(otdb).isNotNull(); + final RocksDB db = otdb.getBaseDB(); + assertThat(db).isNotNull(); + assertThat(db.isOwningHandle()).isFalse(); + } + } +} diff --git a/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java new file mode 100644 index 000000000..4a57e3356 --- /dev/null +++ b/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.util.DirectBytewiseComparator; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OptimisticTransactionOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void setSnapshot() { + try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSetSnapshot(boolValue); + assertThat(opt.isSetSnapshot()).isEqualTo(boolValue); + } + } + + @Test + public void comparator() { + try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions(); + final ComparatorOptions copt = new ComparatorOptions(); + final DirectComparator comparator = new DirectBytewiseComparator(copt)) { + opt.setComparator(comparator); + } + } +} diff --git a/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java new file mode 100644 index 000000000..f44816e64 --- /dev/null +++ b/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java @@ -0,0 +1,350 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class OptimisticTransactionTest extends AbstractTransactionTest { + + @Test + public void getForUpdate_cf_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, k1, v12); + assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void getForUpdate_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(k1, v12); + assertThat(txn2.get(readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_cf_conflict() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, keys[0], otherValue); + assertThat(txn2.get(testCf, readOptions, keys[0])) + .isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_conflict() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(keys[0], otherValue); + assertThat(txn2.get(readOptions, keys[0])) + .isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void undoGetForUpdate_cf_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // undo the getForUpdate + txn3.undoGetForUpdate(testCf, k1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, k1, v12); + assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + // should not cause an exception + // because we undid the getForUpdate above! + txn3.commit(); + } + } + } + } + + @Test + public void undoGetForUpdate_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // undo the getForUpdate + txn3.undoGetForUpdate(k1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(k1, v12); + assertThat(txn2.get(readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + // should not cause an exception + // because we undid the getForUpdate above! + txn3.commit(); + } + } + } + } + + @Test + public void name() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getName()).isEmpty(); + final String name = "my-transaction-" + rand.nextLong(); + + try { + txn.setName(name); + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode() == Status.Code.InvalidArgument); + return; + } + + fail("Optimistic transactions cannot be named."); + } + } + + @Override + public OptimisticTransactionDBContainer startDb() + throws RocksDBException { + final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + + final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY, + columnFamilyOptions)); + final List columnFamilyHandles = new ArrayList<>(); + + final OptimisticTransactionDB optimisticTxnDb; + try { + optimisticTxnDb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } catch(final RocksDBException e) { + columnFamilyOptions.close(); + options.close(); + throw e; + } + + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions(); + + return new OptimisticTransactionDBContainer(optimisticTxnOptions, + writeOptions, columnFamilyHandles, optimisticTxnDb, columnFamilyOptions, + options); + } + + private static class OptimisticTransactionDBContainer + extends DBContainer { + + private final OptimisticTransactionOptions optimisticTxnOptions; + private final OptimisticTransactionDB optimisticTxnDb; + + public OptimisticTransactionDBContainer( + final OptimisticTransactionOptions optimisticTxnOptions, + final WriteOptions writeOptions, + final List columnFamilyHandles, + final OptimisticTransactionDB optimisticTxnDb, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + super(writeOptions, columnFamilyHandles, columnFamilyOptions, + options); + this.optimisticTxnOptions = optimisticTxnOptions; + this.optimisticTxnDb = optimisticTxnDb; + } + + @Override + public Transaction beginTransaction() { + return optimisticTxnDb.beginTransaction(writeOptions, + optimisticTxnOptions); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return optimisticTxnDb.beginTransaction(writeOptions, + optimisticTxnOptions); + } + + @Override + public void close() { + optimisticTxnOptions.close(); + writeOptions.close(); + for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + optimisticTxnDb.close(); + options.close(); + } + } +} diff --git a/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java b/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java new file mode 100644 index 000000000..7eaa6b16c --- /dev/null +++ b/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java @@ -0,0 +1,64 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionDBOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void maxNumLocks() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxNumLocks(longValue); + assertThat(opt.getMaxNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void maxNumStripes() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setNumStripes(longValue); + assertThat(opt.getNumStripes()).isEqualTo(longValue); + } + } + + @Test + public void transactionLockTimeout() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setTransactionLockTimeout(longValue); + assertThat(opt.getTransactionLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void defaultLockTimeout() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setDefaultLockTimeout(longValue); + assertThat(opt.getDefaultLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void writePolicy() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final TxnDBWritePolicy writePolicy = TxnDBWritePolicy.WRITE_UNPREPARED; // non-default + opt.setWritePolicy(writePolicy); + assertThat(opt.getWritePolicy()).isEqualTo(writePolicy); + } + } + +} diff --git a/java/src/test/java/org/rocksdb/TransactionDBTest.java b/java/src/test/java/org/rocksdb/TransactionDBTest.java new file mode 100644 index 000000000..b0ea813ff --- /dev/null +++ b/java/src/test/java/org/rocksdb/TransactionDBTest.java @@ -0,0 +1,178 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; + +public class TransactionDBTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void open() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(tdb).isNotNull(); + } + } + + @Test + public void open_columnFamilies() throws RocksDBException { + try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts)); + + final List columnFamilyHandles = new ArrayList<>(); + + try (final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(dbOptions, txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try { + assertThat(tdb).isNotNull(); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void beginTransaction() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_transactionOptions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions, + txnOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_withOld() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + final Transaction txnReused = tdb.beginTransaction(writeOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void beginTransaction_withOld_transactionOptions() + throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + final Transaction txnReused = tdb.beginTransaction(writeOptions, + txnOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void lockStatusData() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + try (final Transaction txn = tdb.beginTransaction(writeOptions)) { + + final byte key[] = "key".getBytes(UTF_8); + final byte value[] = "value".getBytes(UTF_8); + + txn.put(key, value); + assertThat(txn.getForUpdate(readOptions, key, true)).isEqualTo(value); + + final Map lockStatus = + tdb.getLockStatusData(); + + assertThat(lockStatus.size()).isEqualTo(1); + final Set> entrySet = lockStatus.entrySet(); + final Map.Entry entry = entrySet.iterator().next(); + final long columnFamilyId = entry.getKey(); + assertThat(columnFamilyId).isEqualTo(0); + final TransactionDB.KeyLockInfo keyLockInfo = entry.getValue(); + assertThat(keyLockInfo.getKey()).isEqualTo(new String(key, UTF_8)); + assertThat(keyLockInfo.getTransactionIDs().length).isEqualTo(1); + assertThat(keyLockInfo.getTransactionIDs()[0]).isEqualTo(txn.getId()); + assertThat(keyLockInfo.isExclusive()).isTrue(); + } + } + } + + @Test + public void deadlockInfoBuffer() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + + // TODO(AR) can we cause a deadlock so that we can test the output here? + assertThat(tdb.getDeadlockInfoBuffer()).isEmpty(); + } + } + + @Test + public void setDeadlockInfoBufferSize() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + tdb.setDeadlockInfoBufferSize(123); + } + } +} diff --git a/java/src/test/java/org/rocksdb/TransactionOptionsTest.java b/java/src/test/java/org/rocksdb/TransactionOptionsTest.java new file mode 100644 index 000000000..add0439e0 --- /dev/null +++ b/java/src/test/java/org/rocksdb/TransactionOptionsTest.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void snapshot() { + try (final TransactionOptions opt = new TransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSetSnapshot(boolValue); + assertThat(opt.isSetSnapshot()).isEqualTo(boolValue); + } + } + + @Test + public void deadlockDetect() { + try (final TransactionOptions opt = new TransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDeadlockDetect(boolValue); + assertThat(opt.isDeadlockDetect()).isEqualTo(boolValue); + } + } + + @Test + public void lockTimeout() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setLockTimeout(longValue); + assertThat(opt.getLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void expiration() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setExpiration(longValue); + assertThat(opt.getExpiration()).isEqualTo(longValue); + } + } + + @Test + public void deadlockDetectDepth() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setDeadlockDetectDepth(longValue); + assertThat(opt.getDeadlockDetectDepth()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBatchSize() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxWriteBatchSize(longValue); + assertThat(opt.getMaxWriteBatchSize()).isEqualTo(longValue); + } + } +} diff --git a/java/src/test/java/org/rocksdb/TransactionTest.java b/java/src/test/java/org/rocksdb/TransactionTest.java new file mode 100644 index 000000000..57a05c9e3 --- /dev/null +++ b/java/src/test/java/org/rocksdb/TransactionTest.java @@ -0,0 +1,308 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class TransactionTest extends AbstractTransactionTest { + + @Test + public void getForUpdate_cf_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(testCf, k1, v12); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void getForUpdate_conflict() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte v12[] = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(k1, v12); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_cf_conflict() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(testCf, keys[0], otherValue); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_conflict() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(keys[0], otherValue); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void name() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getName()).isEmpty(); + final String name = "my-transaction-" + rand.nextLong(); + txn.setName(name); + assertThat(txn.getName()).isEqualTo(name); + } + } + + @Test + public void ID() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getID()).isGreaterThan(0); + } + } + + @Test + public void deadlockDetect() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.isDeadlockDetect()).isFalse(); + } + } + + @Test + public void waitingTxns() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getWaitingTxns().getTransactionIds().length).isEqualTo(0); + } + } + + @Test + public void state() throws RocksDBException { + try(final DBContainer dbContainer = startDb()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + txn.commit(); + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.COMMITED); + } + + try(final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + txn.rollback(); + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + } + } + } + + @Test + public void Id() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getId()).isNotNull(); + } + } + + @Override + public TransactionDBContainer startDb() throws RocksDBException { + final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY, + columnFamilyOptions)); + final List columnFamilyHandles = new ArrayList<>(); + + final TransactionDB txnDb; + try { + txnDb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, + columnFamilyHandles); + } catch(final RocksDBException e) { + columnFamilyOptions.close(); + txnDbOptions.close(); + options.close(); + throw e; + } + + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions(); + + return new TransactionDBContainer(txnOptions, writeOptions, + columnFamilyHandles, txnDb, txnDbOptions, columnFamilyOptions, options); + } + + private static class TransactionDBContainer + extends DBContainer { + private final TransactionOptions txnOptions; + private final TransactionDB txnDb; + private final TransactionDBOptions txnDbOptions; + + public TransactionDBContainer( + final TransactionOptions txnOptions, final WriteOptions writeOptions, + final List columnFamilyHandles, + final TransactionDB txnDb, final TransactionDBOptions txnDbOptions, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + super(writeOptions, columnFamilyHandles, columnFamilyOptions, + options); + this.txnOptions = txnOptions; + this.txnDb = txnDb; + this.txnDbOptions = txnDbOptions; + } + + @Override + public Transaction beginTransaction() { + return txnDb.beginTransaction(writeOptions, txnOptions); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return txnDb.beginTransaction(writeOptions, txnOptions); + } + + @Override + public void close() { + txnOptions.close(); + writeOptions.close(); + for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + txnDb.close(); + txnDbOptions.close(); + options.close(); + } + } + +} diff --git a/src.mk b/src.mk index 1182af889..17bf94d76 100644 --- a/src.mk +++ b/src.mk @@ -396,6 +396,8 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/lru_cache.cc \ java/rocksjni/memtablejni.cc \ java/rocksjni/merge_operator.cc \ + java/rocksjni/optimistic_transaction_db.cc \ + java/rocksjni/optimistic_transaction_options.cc \ java/rocksjni/options.cc \ java/rocksjni/options_util.cc \ java/rocksjni/ratelimiterjni.cc \ @@ -412,7 +414,13 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/statistics.cc \ java/rocksjni/statisticsjni.cc \ java/rocksjni/table.cc \ + java/rocksjni/transaction.cc \ + java/rocksjni/transaction_db.cc \ + java/rocksjni/transaction_options.cc \ + java/rocksjni/transaction_db_options.cc \ java/rocksjni/transaction_log.cc \ + java/rocksjni/transaction_notifier.cc \ + java/rocksjni/transaction_notifier_jnicallback.cc \ java/rocksjni/ttl.cc \ java/rocksjni/write_batch.cc \ java/rocksjni/writebatchhandlerjnicallback.cc \